diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 1293b3e5332..a5c26141117 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -26,6 +26,9 @@ - [ ] My change requires a change to the documentation. - [ ] My name is in the list of CITATION.cff +- [ ] I agree that PEcAn Project may distribute my contribution under any or all of + - the same license as the existing code, + - and/or the BSD 3-clause license. - [ ] I have updated the CHANGELOG.md. - [ ] I have updated the documentation accordingly. - [ ] I have read the **CONTRIBUTING** document. diff --git a/.github/labeler.yml b/.github/labeler.yml new file mode 100644 index 00000000000..f3ca5a77c40 --- /dev/null +++ b/.github/labeler.yml @@ -0,0 +1,57 @@ +# +# Add project labels to PRs +# Invocation is done by .github/workflows/prlabeler.yml + +# changes in the documentation +'Documentation': +- changed-files: + - any-glob-to-any-file: 'book_source/**' + - any-glob-to-any-file: 'documentation/**' + - any-glob-to-any-file: 'CONTRIBUTING.md' + - any-glob-to-any-file: 'DEBUGING.md' + - any-glob-to-any-file: 'DEV-INTRO.md' + - any-glob-to-any-file: 'README.md' + +# Add 'Dockerfile' label to any changes in the docker directory +'Dockerfile': +- changed-files: + - any-glob-to-any-file: 'docker/**' + + +# Add 'Website' label to any changes in the web directory +'Website': +- changed-files: + - any-glob-to-any-file: 'web/**' + +# Add 'Base' label to any changes in the base directory +'Base': +- changed-files: + - any-glob-to-any-file: 'base/**' + +# Add 'Models' label to any changes in the models directory +'Models': +- changed-files: + - any-glob-to-any-file: 'models/**' + +# Add 'Modules' label to any changes in the modules directory +'Modules': +- changed-files: + - any-glob-to-any-file: 'modules/**' + +# Add 'GitHub Actions' label to any changes in the .github/workflows directory +'GitHub Actions': +- changed-files: + - any-glob-to-any-file: '.github/workflows/**' + +# Add 'Scripts' label to any changes in the scripts directory + +'Scripts': +- changed-files: + - any-glob-to-any-file: 'scripts/**' + +# Add 'Tests' label to any changes in the tests directory +'Tests': +- all: + - changed-files: + - any-glob-to-any-file: ['tests/**', '**/tests/**'] + - any-glob-to-any-file: '!**/tests/Rcheck_reference.log' diff --git a/.github/settings.yml b/.github/settings.yml new file mode 100644 index 00000000000..a7d87f7d2f9 --- /dev/null +++ b/.github/settings.yml @@ -0,0 +1,30 @@ +#Define the colour of labels over here + +labels: + + - name: "Documentation" + color: a2dcf2 + + - name: "Dockerfile" + color: 0052CC + + - name: "Website" + color: 84b6eb + + - name: "Base" + color: 1ED626 + + - name: "Models" + color: C5DEF5 + + - name: "Modules" + color: FBCA04 + + - name: "GitHub Actions" + color: 84b6eb + + - name: "Scripts" + color: 3B8924 + + - name: "Tests" + color: ff8c00 diff --git a/.github/workflows/book.yml b/.github/workflows/book.yml index bd4df36be03..14e14c75d06 100644 --- a/.github/workflows/book.yml +++ b/.github/workflows/book.yml @@ -3,15 +3,13 @@ name: renderbook on: push: branches: - - master + - main - develop - tags: - '*' - pull_request: - merge_group: + workflow_dispatch: jobs: bookdown: @@ -24,11 +22,13 @@ jobs: steps: # checkout source code - - uses: actions/checkout@v3 - # install rmarkdown - - name: Install rmarkdown + - uses: actions/checkout@v4 + # install bookdown + - name: Install bookdown run: | - Rscript -e 'install.packages(c("rmarkdown","bookdown"))' + Rscript \ + -e 'repos <- c(getOption("repos"), sub(r"(\d{4}-\d{2}-\d{2})", "latest", getOption("repos")))' \ + -e 'remotes::install_version("bookdown", ">= 0.31", dependencies = TRUE, upgrade = FALSE, repos = repos)' # copy files - name: copy extfiles run: | @@ -52,22 +52,25 @@ jobs: path: book_source/_book/ # download documentation repo - name: Checkout documentation repo - if: github.event_name != 'pull_request' - uses: actions/checkout@v3 + if: github.event_name == 'push' + uses: actions/checkout@v4 with: repository: ${{ github.repository_owner }}/pecan-documentation path: pecan-documentation token: ${{ secrets.GH_PAT }} # upload new documentation - name: publish to github - if: github.event_name != 'pull_request' + if: github.event_name == 'push' run: | git config --global user.email "pecanproj@gmail.com" git config --global user.name "GitHub Documentation Robot" - export VERSION=${GITHUB_REF##*/} + export VERSION=$(echo $GITHUB_REF | sed 's,.*/,,' ) + if [ "$VERSION" = "main" ]; then + export VERSION=latest + fi cd pecan-documentation mkdir -p $VERSION rsync -a --delete ../book_source/_book/ ${VERSION}/ git add --all * git commit -m "Build book from pecan revision ${GITHUB_SHA}" || true - git push -q origin master + git push -q origin main diff --git a/.github/workflows/check.yml b/.github/workflows/check.yml new file mode 100644 index 00000000000..4e40e19ce5f --- /dev/null +++ b/.github/workflows/check.yml @@ -0,0 +1,69 @@ +name: check + +on: + workflow_call: + inputs: + R-version: + required: true + type: string + make-grouping: + required: true + type: string + +env: + R_LIBS_USER: /usr/local/lib/R/site-library + LC_ALL: en_US.UTF-8 + NCPUS: 2 + PGHOST: postgres + CI: true + +jobs: + check: + runs-on: ubuntu-latest + + env: + GITHUB_PAT: ${{ secrets.GITHUB_TOKEN }} + _R_CHECK_LENGTH_1_CONDITION_: true + _R_CHECK_LENGTH_1_LOGIC2_: true + # Avoid compilation check warnings that come from the system Makevars + # See https://stat.ethz.ch/pipermail/r-package-devel/2019q2/003898.html + _R_CHECK_COMPILATION_FLAGS_KNOWN_: -Wformat -Werror=format-security -Wdate-time + # Keep R checks from trying to consult the very flaky worldclockapi.com + _R_CHECK_SYSTEM_CLOCK_: 0 + + container: + image: pecan/depends:R${{ inputs.R-version }} + + steps: + # checkout source code + - name: work around https://github.com/actions/checkout/issues/766 + run: git config --global --add safe.directory "$GITHUB_WORKSPACE" + - uses: actions/checkout@v4 + with: + set-safe-directory: false + + # Forbid spaces in names. Yes, *we* know it's not 1980 anymore, but Make doesn't. + - name: check for filenames that would confuse Make + run: | + SPACENAMES=`find . -name '* *'` + if [ -n "$SPACENAMES" ]; then + echo "::error file=${SPACENAMES}::Spaces in filename(s): ${SPACENAMES}. Please rename these files by converting spaces to underscores." + exit 1 + fi + + # install additional tools needed + - name: install utils + run: apt-get update && apt-get install -y postgresql-client qpdf + - name: install new dependencies + run: Rscript scripts/generate_dependencies.R && cd docker/depends && Rscript pecan.depends.R + + # run PEcAn checks + # The package names of base, modules, and models are passed as matrix variables to avoid repeatability of code + - name: check + run: make -j1 ${{ inputs.make-grouping }} + env: + REBUILD_DOCS: "FALSE" + RUN_TESTS: "FALSE" + + - name: check for out-of-date files + uses: infotroph/tree-is-clean@v1 diff --git a/.github/workflows/ci-weekly.yml b/.github/workflows/ci-weekly.yml index 09ad97e300c..36bdebe6950 100644 --- a/.github/workflows/ci-weekly.yml +++ b/.github/workflows/ci-weekly.yml @@ -3,179 +3,44 @@ name: Weekly Checks on: # every Monday at 4:30 AM # (hopefully after the 1:30 AM `depends` build has completed) - schedule: + schedule: - cron: '30 4 * * 1' - -env: - R_LIBS_USER: /usr/local/lib/R/site-library - LC_ALL: en_US.UTF-8 - NCPUS: 2 - PGHOST: postgres - CI: true + workflow_dispatch: jobs: - # ---------------------------------------------------------------------- - # R TEST - # ---------------------------------------------------------------------- test: - if: github.event_name != 'issue_comment' || startsWith(github.event.comment.body, '/build') - runs-on: ubuntu-latest - env: - GITHUB_PAT: ${{ secrets.GITHUB_TOKEN }} - strategy: fail-fast: false matrix: R: + - "4.3" - "devel" + uses: ./.github/workflows/test.yml + with: + R-version: ${{ matrix.R }} + secrets: inherit - services: - postgres: - image: mdillon/postgis:9.5 - options: --health-cmd pg_isready --health-interval 10s --health-timeout 5s --health-retries 5 - - container: - image: pecan/depends:R${{ matrix.R }} - - steps: - # checkout source code - - name: work around https://github.com/actions/checkout/issues/766 - run: git config --global --add safe.directory "$GITHUB_WORKSPACE" - - uses: actions/checkout@v3 - with: - set-safe-directory: false - - # install additional tools needed - - name: install utils - run: apt-get update && apt-get install -y postgresql-client qpdf - - name: update dependency lists - run: Rscript scripts/generate_dependencies.R - - name: check for out-of-date dependencies files - uses: infotroph/tree-is-clean@v1 - - name: install newly-added dependencies - run: Rscript docker/depends/pecan.depends.R - - # initialize database - - name: db setup - uses: docker://pecan/db:ci - - name: add models to db - run: ./scripts/add.models.sh - - # run PEcAn tests - - name: test - run: make -j1 test - - name: check for out-of-date files - uses: infotroph/tree-is-clean@v1 - - # ---------------------------------------------------------------------- - # R CHECK - # ---------------------------------------------------------------------- check: - if: github.event_name != 'issue_comment' || startsWith(github.event.comment.body, '/build') - runs-on: ubuntu-latest - strategy: fail-fast: false matrix: R: + - "4.3" - "devel" + uses: ./.github/workflows/check.yml + with: + R-version: ${{ matrix.R }} + make-grouping: "check" + secrets: inherit - env: - GITHUB_PAT: ${{ secrets.GITHUB_TOKEN }} - _R_CHECK_LENGTH_1_CONDITION_: true - _R_CHECK_LENGTH_1_LOGIC2_: true - # Avoid compilation check warnings that come from the system Makevars - # See https://stat.ethz.ch/pipermail/r-package-devel/2019q2/003898.html - _R_CHECK_COMPILATION_FLAGS_KNOWN_: -Wformat -Werror=format-security -Wdate-time - # Keep R checks from trying to consult the very flaky worldclockapi.com - _R_CHECK_SYSTEM_CLOCK_: 0 - - container: - image: pecan/depends:R${{ matrix.R }} - - steps: - # checkout source code - - name: work around https://github.com/actions/checkout/issues/766 - run: git config --global --add safe.directory "$GITHUB_WORKSPACE" - - uses: actions/checkout@v3 - with: - set-safe-directory: false - - # install additional tools needed - - name: install utils - run: apt-get update && apt-get install -y postgresql-client qpdf - - name: install new dependencies - run: Rscript scripts/generate_dependencies.R && Rscript docker/depends/pecan.depends.R - - # run PEcAn checks - - name: check - run: make -j1 check - env: - REBUILD_DOCS: "FALSE" - RUN_TESTS: "FALSE" - - name: check for out-of-date files - uses: infotroph/tree-is-clean@v1 - - - # ---------------------------------------------------------------------- - # SIPNET TESTS - # ---------------------------------------------------------------------- sipnet: - if: github.event_name != 'issue_comment' || startsWith(github.event.comment.body, '/build') - runs-on: ubuntu-latest - env: - GITHUB_PAT: ${{ secrets.GITHUB_TOKEN }} - strategy: fail-fast: false matrix: R: + - "4.3" - "devel" - - services: - postgres: - image: mdillon/postgis:9.5 - options: --health-cmd pg_isready --health-interval 10s --health-timeout 5s --health-retries 5 - - container: - image: pecan/depends:R${{ matrix.R }} - - steps: - # checkout source code - - name: work around https://github.com/actions/checkout/issues/766 - run: git config --global --add safe.directory "$GITHUB_WORKSPACE" - - uses: actions/checkout@v3 - with: - set-safe-directory: false - - # install additional tools needed - - name: install utils - run: apt-get update && apt-get install -y postgresql-client qpdf - - name: install new dependencies - run: Rscript scripts/generate_dependencies.R && Rscript docker/depends/pecan.depends.R - - # initialize database - - name: db setup - uses: docker://pecan/db:ci - - name: add models to db - run: ./scripts/add.models.sh - - # install sipnet - - name: Check out SIPNET - uses: actions/checkout@v3 - with: - repository: PecanProject/sipnet - path: sipnet - set-safe-directory: false - - name: install sipnet - run: | - cd ${GITHUB_WORKSPACE}/sipnet - make - - # compile PEcAn code - - name: build - run: make -j1 - - # run SIPNET test - - name: integration test - run: ./tests/integration.sh ghaction + uses: ./.github/workflows/sipnet.yml + with: + R-version: ${{ matrix.R }} + secrets: inherit diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 3b84bd447c3..24ef77419b5 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -3,19 +3,16 @@ name: CI on: push: branches: - - master + - main - develop - tags: - - '*' - + - "v*.*.*" pull_request: - merge_group: - issue_comment: types: - created + workflow_dispatch: env: R_LIBS_USER: /usr/local/lib/R/site-library @@ -25,180 +22,43 @@ env: CI: true jobs: - # ---------------------------------------------------------------------- - # R TEST - # ---------------------------------------------------------------------- test: if: github.event_name != 'issue_comment' || startsWith(github.event.comment.body, '/build') - runs-on: ubuntu-latest - env: - GITHUB_PAT: ${{ secrets.GITHUB_TOKEN }} - strategy: fail-fast: false matrix: R: - - "4.0" + - "4.2" - "4.1" + uses: ./.github/workflows/test.yml + with: + R-version: ${{ matrix.R }} + secrets: inherit - services: - postgres: - image: mdillon/postgis:9.5 - options: --health-cmd pg_isready --health-interval 10s --health-timeout 5s --health-retries 5 - - container: - image: pecan/depends:R${{ matrix.R }} - - steps: - # checkout source code - - name: work around https://github.com/actions/checkout/issues/766 - run: git config --global --add safe.directory "$GITHUB_WORKSPACE" - - uses: actions/checkout@v3 - with: - set-safe-directory: false - - # install additional tools needed - - name: install utils - run: apt-get update && apt-get install -y postgresql-client qpdf - - name: update dependency lists - run: Rscript scripts/generate_dependencies.R - - name: check for out-of-date dependencies files - uses: infotroph/tree-is-clean@v1 - - name: install newly-added dependencies - run: Rscript docker/depends/pecan.depends.R - - # initialize database - - name: db setup - uses: docker://pecan/db:ci - - name: add models to db - run: ./scripts/add.models.sh - - # run PEcAn tests - - name: test - run: make -j1 test - - name: check for out-of-date files - uses: infotroph/tree-is-clean@v1 - - # ---------------------------------------------------------------------- - # R CHECK - # ---------------------------------------------------------------------- check: if: github.event_name != 'issue_comment' || startsWith(github.event.comment.body, '/build') - runs-on: ubuntu-latest - strategy: fail-fast: false matrix: + package: [check_base, check_modules, check_models] R: - - "4.0" + - "4.2" - "4.1" + uses: ./.github/workflows/check.yml + with: + R-version: ${{ matrix.R }} + make-grouping: ${{ matrix.package }} + secrets: inherit - env: - GITHUB_PAT: ${{ secrets.GITHUB_TOKEN }} - _R_CHECK_LENGTH_1_CONDITION_: true - _R_CHECK_LENGTH_1_LOGIC2_: true - # Avoid compilation check warnings that come from the system Makevars - # See https://stat.ethz.ch/pipermail/r-package-devel/2019q2/003898.html - _R_CHECK_COMPILATION_FLAGS_KNOWN_: -Wformat -Werror=format-security -Wdate-time - # Keep R checks from trying to consult the very flaky worldclockapi.com - _R_CHECK_SYSTEM_CLOCK_: 0 - - container: - image: pecan/depends:R${{ matrix.R }} - - steps: - # checkout source code - - name: work around https://github.com/actions/checkout/issues/766 - run: git config --global --add safe.directory "$GITHUB_WORKSPACE" - - uses: actions/checkout@v3 - with: - set-safe-directory: false - - # Forbid spaces in names. Yes, *we* know it's not 1980 anymore, but Make doesn't. - - name: check for filenames that would confuse Make - run: | - SPACENAMES=`find . -name '* *'` - if [ -n "$SPACENAMES" ]; then - echo "::error file=${SPACENAMES}::Spaces in filename(s): ${SPACENAMES}. Please rename these files by converting spaces to underscores." - exit 1 - fi - - # install additional tools needed - - name: install utils - run: apt-get update && apt-get install -y postgresql-client qpdf - - name: install new dependencies - run: Rscript scripts/generate_dependencies.R && Rscript docker/depends/pecan.depends.R - - # run PEcAn checks - - name: check - run: make -j1 check - env: - REBUILD_DOCS: "FALSE" - RUN_TESTS: "FALSE" - - name: check for out-of-date files - uses: infotroph/tree-is-clean@v1 - - - # ---------------------------------------------------------------------- - # SIPNET TESTS - # ---------------------------------------------------------------------- sipnet: if: github.event_name != 'issue_comment' || startsWith(github.event.comment.body, '/build') - runs-on: ubuntu-latest - env: - GITHUB_PAT: ${{ secrets.GITHUB_TOKEN }} - strategy: fail-fast: false matrix: R: - - "4.0" + - "4.2" - "4.1" - - services: - postgres: - image: mdillon/postgis:9.5 - options: --health-cmd pg_isready --health-interval 10s --health-timeout 5s --health-retries 5 - - container: - image: pecan/depends:R${{ matrix.R }} - - steps: - # checkout source code - - name: work around https://github.com/actions/checkout/issues/766 - run: git config --global --add safe.directory "$GITHUB_WORKSPACE" - - uses: actions/checkout@v3 - with: - set-safe-directory: false - - # install additional tools needed - - name: install utils - run: apt-get update && apt-get install -y postgresql-client qpdf - - name: install new dependencies - run: Rscript scripts/generate_dependencies.R && Rscript docker/depends/pecan.depends.R - - # initialize database - - name: db setup - uses: docker://pecan/db:ci - - name: add models to db - run: ./scripts/add.models.sh - - # install sipnet - - name: Check out SIPNET - uses: actions/checkout@v3 - with: - repository: PecanProject/sipnet - path: sipnet - set-safe-directory: false - - name: install sipnet - run: | - cd ${GITHUB_WORKSPACE}/sipnet - make - - # compile PEcAn code - - name: build - run: make -j1 - - # run SIPNET test - - name: integration test - run: ./tests/integration.sh ghaction + uses: ./.github/workflows/sipnet.yml + with: + R-version: ${{ matrix.R }} + secrets: inherit diff --git a/.github/workflows/depends.yml b/.github/workflows/depends.yml deleted file mode 100644 index 4aabdddc531..00000000000 --- a/.github/workflows/depends.yml +++ /dev/null @@ -1,93 +0,0 @@ -name: Docker Depends Image - -on: - push: - branches: - - develop - - master - - # this runs on the develop branch - schedule: - - cron: '0 0 * * *' # midnight daily - - cron: '30 1 * * 1' # 1:30 AM every Monday (devel only) - -env: - # official supported version of R - SUPPORTED: 4.1 - DOCKERHUB_ORG: pecan - -jobs: - depends: - if: github.repository == 'PecanProject/pecan' - runs-on: ubuntu-latest - env: - GITHUB_PAT: ${{ secrets.GITHUB_TOKEN }} - - strategy: - fail-fast: false - matrix: - R: - - "4.0" - - "4.1" - - "devel" - - steps: - - name: Work around https://github.com/actions/checkout/issues/766 - run: git config --global --add safe.directory "$GITHUB_WORKSPACE" - - uses: actions/checkout@v3 - with: - set-safe-directory: false - - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v2 - with: - driver: docker - buildkitd-flags: --debug - install: true - - # calculate some variables that are used later - - name: github branch - # build Rdevel only on Mondays, others every day (but not twice on Mondays) - if: (matrix.R != 'devel' && github.event.schedule == '0 0 * * *') || (matrix.R == 'devel' && github.event.schedule == '30 1 * * 1') - run: | - BRANCH=${GITHUB_REF##*/} - echo "GITHUB_BRANCH=${BRANCH}" >> $GITHUB_ENV - - tags="R${{ matrix.R }}" - if [ "${{ matrix.R }}" == "${{ env.SUPPORTED }}" ]; then - if [ "$BRANCH" == "master" ]; then - tags="${tags},latest" - elif [ "$BRANCH" == "develop" ]; then - tags="${tags},develop" - fi - fi - echo "TAG=${tags}" >> $GITHUB_ENV - - # this will publish to the actor (person) github packages - - name: Publish to GitHub - if: env.TAG != '' - uses: elgohr/Publish-Docker-Github-Action@v5 - env: - R_VERSION: ${{ matrix.R }} - with: - name: ${{ github.repository_owner }}/pecan/depends - username: ${{ github.actor }} - password: ${{ secrets.GITHUB_TOKEN }} - context: docker/depends - tags: "${{ env.TAG }}" - registry: docker.pkg.github.com - buildargs: R_VERSION - - # this will publish to the clowder dockerhub repo - - name: Publish to Docker Hub - if: env.TAG != '' - uses: elgohr/Publish-Docker-Github-Action@v5 - env: - R_VERSION: ${{ matrix.R }} - with: - name: ${{ env.DOCKERHUB_ORG }}/depends - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_PASSWORD }} - context: docker/depends - tags: "${{ env.TAG }}" - buildargs: R_VERSION diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index aa07fedd1eb..807e621f5d8 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -1,118 +1,615 @@ -name: Docker - -# initially we would us on: [release] as well, the problem is that -# the code in clowder would not know what branch the code is in, -# and would not set the right version flags. - -# This will run when: -# - when new code is pushed to master/develop to push the tags -# latest and develop -# - when a pull request is created and updated to make sure the -# Dockerfile is still valid. -# To be able to push to dockerhub, this expects the following -# secrets to be set in the project: -# - DOCKERHUB_USERNAME : username that can push to the org -# - DOCKERHUB_PASSWORD : password asscoaited with the username +name: Docker GHA + on: push: branches: - - master + - main - develop - + tags: + - "v*.*.*" pull_request: - merge_group: + workflow_dispatch: + inputs: + r_version: + description: 'R version to use' + required: true + type: choice + default: "4.1" + options: + - 4.1 + - 4.2 + - 4.3 + - 4.4 + - devel - issue_comment: - types: - - created - -# Certain actions will only run when this is the master repo. +# set up the environment, either input or default env: - MASTER_REPO: PecanProject/pecan - DOCKERHUB_ORG: pecan - GITHUB_PAT: ${{ secrets.GITHUB_TOKEN }} + R_VERSION: ${{ github.event.inputs.r_version || '4.1' }} +# there are 3 jobs to build different images jobs: - docker: - if: github.event_name != 'issue_comment' || startsWith(github.event.comment.body, '/build') + # ---------------------------------------------------------------------- + # depends image has all the dependencies installed + # ---------------------------------------------------------------------- + depends: runs-on: ubuntu-latest + permissions: + packages: write steps: - - name: Work around https://github.com/actions/checkout/issues/766 - run: git config --global --add safe.directory "$GITHUB_WORKSPACE" - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 + + # create metadata for image + - name: Docker meta + id: meta + uses: docker/metadata-action@v5 with: - set-safe-directory: false + # list of Docker images to use as base name for tags + images: | + pecan/depends + ghcr.io/${{ github.repository_owner }}/${{ github.repository }}/depends + # generate Docker tags based on the following events/attributes + tags: | + type=schedule + type=ref,event=branch + type=ref,event=pr + type=semver,pattern={{version}} + type=semver,pattern={{major}}.{{minor}} + type=semver,pattern={{major}} + + # setup docker build + - name: Set up QEMU + uses: docker/setup-qemu-action@v3 + - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v2 + id: buildx + uses: docker/setup-buildx-action@v3 + + - name: Inspect Builder + run: | + echo "Name: ${{ steps.buildx.outputs.name }}" + echo "Endpoint: ${{ steps.buildx.outputs.endpoint }}" + echo "Status: ${{ steps.buildx.outputs.status }}" + echo "Flags: ${{ steps.buildx.outputs.flags }}" + echo "Platforms: ${{ steps.buildx.outputs.platforms }}" + + # login to registries + - name: Login to DockerHub + uses: docker/login-action@v3 with: - driver: docker - buildkitd-flags: --debug - install: true + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_PASSWORD }} - # calculate some variables that are used later - - name: get version tag + - name: Login to GitHub Container Registry + uses: docker/login-action@v3 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + # build the docker images + - name: Build and push depends + uses: docker/build-push-action@v6 + with: + context: docker/depends + file: docker/depends/Dockerfile + push: true + platforms: "linux/amd64" + cache-from: type=registry,ref=pecan/depends:buildcache + cache-to: type=registry,ref=pecan/depends:buildcache,mode=max + tags: ${{ steps.meta.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} + build-args: | + VERSION=${{ steps.meta.outputs.version }} + R_VERSION=${{ env.R_VERSION }} + GITHUB_PAT=${{ secrets.GITHUB_TOKEN }} + + # ---------------------------------------------------------------------- + # base image has PEcAn compiled and installed, and depends on depends + # ---------------------------------------------------------------------- + base: + runs-on: ubuntu-latest + needs: depends + permissions: + packages: write + + steps: + - uses: actions/checkout@v4 + + # create metadata for image + - name: Docker meta + id: meta + uses: docker/metadata-action@v5 + with: + # list of Docker images to use as base name for tags + images: | + pecan/base + ghcr.io/${{ github.repository_owner }}/${{ github.repository }}/base + # generate Docker tags based on the following events/attributes + tags: | + type=schedule + type=ref,event=branch + type=ref,event=pr + type=semver,pattern={{version}} + type=semver,pattern={{major}}.{{minor}} + type=semver,pattern={{major}} + + # setup docker build + - name: Set up QEMU + uses: docker/setup-qemu-action@v3 + + - name: Set up Docker Buildx + id: buildx + uses: docker/setup-buildx-action@v3 + + - name: Inspect Builder run: | - BRANCH=${GITHUB_REF##*/} - echo "GITHUB_BRANCH=${BRANCH}" >> $GITHUB_ENV - if [ "$BRANCH" == "master" ]; then - version="$(awk '/Version:/ { print $2 }' base/all/DESCRIPTION)" - tags="latest" - oldversion="" - while [ "${oldversion}" != "${version}" ]; do - oldversion="${version}" - tags="${tags},${version}" - version=${version%.*} - done - echo "PECAN_VERSION=$(awk '/Version:/ { print $2 }' base/all/DESCRIPTION)" >> $GITHUB_ENV - echo "PECAN_TAGS=${tags}" >> $GITHUB_ENV - elif [ "$BRANCH" == "develop" ]; then - echo "PECAN_VERSION=develop" >> $GITHUB_ENV - echo "PECAN_TAGS=develop" >> $GITHUB_ENV - else - echo "PECAN_VERSION=develop" >> $GITHUB_ENV - echo "PECAN_TAGS=develop" >> $GITHUB_ENV - fi - - # use shell script to build, there is some complexity in this - - name: create images - run: ./docker.sh -i github - env: - PECAN_GIT_CHECKSUM: ${{ github.sha }} - PECAN_GIT_BRANCH: ${GITHUB_BRANCH} - VERSION: ${{ env.PECAN_VERSION }} - - # push all images to github - - name: Publish to GitHub - if: github.event_name == 'push' && github.repository == env.MASTER_REPO + echo "Name: ${{ steps.buildx.outputs.name }}" + echo "Endpoint: ${{ steps.buildx.outputs.endpoint }}" + echo "Status: ${{ steps.buildx.outputs.status }}" + echo "Flags: ${{ steps.buildx.outputs.flags }}" + echo "Platforms: ${{ steps.buildx.outputs.platforms }}" + + # login to registries + - name: Login to DockerHub + uses: docker/login-action@v3 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_PASSWORD }} + + - name: Login to GitHub Container Registry + uses: docker/login-action@v3 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + # build the docker images + - name: Build and push base + uses: docker/build-push-action@v6 + with: + context: . + file: docker/base/Dockerfile + push: true + platforms: "linux/amd64" + cache-from: type=registry,ref=pecan/base:buildcache + cache-to: type=registry,ref=pecan/base:buildcache,mode=max + tags: ${{ steps.meta.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} + build-args: | + VERSION=${{ steps.meta.outputs.version }} + R_VERSION=${{ env.R_VERSION }} + FROM_IMAGE=depends + IMAGE_VERSION=${{ steps.meta.outputs.version }} + GITHUB_PAT=${{ secrets.GITHUB_TOKEN }} + PECAN_VERSION=${{ steps.meta.outputs.version }} + PECAN_GIT_BRANCH= ${{ github.head_ref || github.ref_name }} + PECAN_GIT_CHECKSUM=${{ github.sha }} + PECAN_GIT_DATE=${{ github.event.repository.updated_at }} + +# ---------------------------------------------------------------------- +# models image has some python installed to run models, depends on base +# ---------------------------------------------------------------------- + models: + runs-on: ubuntu-latest + needs: base + permissions: + packages: write + + steps: + - uses: actions/checkout@v4 + + # create metadata for image + - name: Docker meta + id: meta + uses: docker/metadata-action@v5 + with: + # list of Docker images to use as models name for tags + images: | + pecan/models + ghcr.io/${{ github.repository_owner }}/${{ github.repository }}/models + # generate Docker tags based on the following events/attributes + tags: | + type=schedule + type=ref,event=branch + type=ref,event=pr + type=semver,pattern={{version}} + type=semver,pattern={{major}}.{{minor}} + type=semver,pattern={{major}} + + # setup docker build + - name: Set up QEMU + uses: docker/setup-qemu-action@v3 + + - name: Set up Docker Buildx + id: buildx + uses: docker/setup-buildx-action@v3 + + - name: Inspect Builder run: | - echo "${INPUT_PASSWORD}" | docker login -u ${INPUT_USERNAME} --password-stdin ${INPUT_REGISTRY} - repo=$(echo ${{ github.repository_owner }} | tr 'A-Z' 'a-z') - for image in $(docker image ls pecan/*:github --format "{{ .Repository }}"); do - for v in ${PECAN_TAGS}; do - docker tag ${image}:github ${INPUT_REGISTRY}/${repo}/${image#pecan/}:${v} - docker push ${INPUT_REGISTRY}/${repo}/${image#pecan/}:${v} - done - done - docker logout - env: - INPUT_REGISTRY: ghcr.io - INPUT_USERNAME: ${{ secrets.GHCR_USERNAME }} - INPUT_PASSWORD: ${{ secrets.GHCR_PASSWORD }} - - # push all images to dockerhub - - name: Publish to DockerHub - if: github.event_name == 'push' && github.repository == env.MASTER_REPO + echo "Name: ${{ steps.buildx.outputs.name }}" + echo "Endpoint: ${{ steps.buildx.outputs.endpoint }}" + echo "Status: ${{ steps.buildx.outputs.status }}" + echo "Flags: ${{ steps.buildx.outputs.flags }}" + echo "Platforms: ${{ steps.buildx.outputs.platforms }}" + + # login to registries + - name: Login to DockerHub + uses: docker/login-action@v3 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_PASSWORD }} + + - name: Login to GitHub Container Registry + uses: docker/login-action@v3 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + # build the docker images + - name: Build and push models + uses: docker/build-push-action@v6 + with: + context: docker/models + file: docker/models/Dockerfile + push: true + platforms: "linux/amd64" + cache-from: type=registry,ref=pecan/models:buildcache + cache-to: type=registry,ref=pecan/models:buildcache,mode=max + tags: ${{ steps.meta.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} + build-args: | + VERSION=${{ steps.meta.outputs.version }} + R_VERSION=${{ env.R_VERSION }} + FROM_IMAGE=depends + IMAGE_VERSION=${{ steps.meta.outputs.version }} + GITHUB_PAT=${{ secrets.GITHUB_TOKEN }} + PECAN_VERSION=${{ steps.meta.outputs.version }} + PECAN_GIT_BRANCH= ${{ github.head_ref || github.ref_name }} + PECAN_GIT_CHECKSUM=${{ github.sha }} + PECAN_GIT_DATE=${{ github.event.repository.updated_at }} + +# ---------------------------------------------------------------------- +# Next are images that have models installed +# ---------------------------------------------------------------------- + modelsbinary: + runs-on: ubuntu-latest + needs: models + permissions: + packages: write + strategy: + fail-fast: false + matrix: + name: + - basgra + - biocro + # - ed2_2.2.0 + - ed2_git + - maespa + - sipnet + include: + - name: basgra + CONTEXT: models/basgra + DOCKERFILE: models/basgra/Dockerfile + PLATFORM: "linux/amd64" + MODEL: basgra + VERSION: BASGRA_N_v1 + - name: biocro + CONTEXT: models/biocro + DOCKERFILE: models/biocro/Dockerfile + PLATFORM: "linux/amd64" + MODEL: biocro + VERSION: "0.95" + # - name: ed2_2.2.0 + # CONTEXT: models/ed + # DOCKERFILE: models/ed/Dockerfile + # PLATFORM: "linux/amd64" + # MODEL: ed2 + # VERSION: "2.2.0" + - name: ed2_git + CONTEXT: models/ed + DOCKERFILE: models/ed/Dockerfile + PLATFORM: "linux/amd64" + MODEL: ed2 + VERSION: "git" + - name: maespa + CONTEXT: models/maespa + DOCKERFILE: models/maespa/Dockerfile + PLATFORM: "linux/amd64" + MODEL: maespa + VERSION: "git" + - name: sipnet + CONTEXT: models/sipnet + DOCKERFILE: models/sipnet/Dockerfile + PLATFORM: "linux/amd64" + MODEL: sipnet + VERSION: "git" + + steps: + - uses: actions/checkout@v4 + + # lower case name for docker + - name: docker image name + id: lower + run: echo "image_name=$(echo model-${{ matrix.MODEL }}-${{ matrix.VERSION }} | tr '[:upper:]' '[:lower:]')" >> $GITHUB_OUTPUT + + # create metadata for image + - name: Docker meta + id: meta + uses: docker/metadata-action@v5 + with: + # list of Docker images to use as base name for tags + images: | + pecan/${{ steps.lower.outputs.image_name }} + ghcr.io/${{ github.repository_owner }}/${{ github.repository }}/${{ steps.lower.outputs.image_name }} + # generate Docker tags based on the following events/attributes + tags: | + type=schedule + type=ref,event=branch + type=ref,event=pr + type=semver,pattern={{version}} + type=semver,pattern={{major}}.{{minor}} + type=semver,pattern={{major}} + + # setup docker build + - name: Set up QEMU + uses: docker/setup-qemu-action@v3 + + - name: Set up Docker Buildx + id: buildx + uses: docker/setup-buildx-action@v3 + + - name: Inspect Builder + run: | + echo "Name: ${{ steps.buildx.outputs.name }}" + echo "Endpoint: ${{ steps.buildx.outputs.endpoint }}" + echo "Status: ${{ steps.buildx.outputs.status }}" + echo "Flags: ${{ steps.buildx.outputs.flags }}" + echo "Platforms: ${{ steps.buildx.outputs.platforms }}" + + # login to registries + - name: Login to DockerHub + uses: docker/login-action@v3 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_PASSWORD }} + + - name: Login to GitHub Container Registry + uses: docker/login-action@v3 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: docker image ls + run: docker image ls + + # build the docker images + - name: Build and push ${{ matrix.MODEL }} ${{ matrix.VERSION }} + uses: docker/build-push-action@v6 + with: + context: ${{ matrix.CONTEXT }} + file: ${{ matrix.DOCKERFILE }} + push: true + platforms: ${{ matrix.PLATFORM }} + cache-from: type=registry,ref=pecan/${{ steps.lower.outputs.image_name }}:buildcache + cache-to: type=registry,ref=pecan/${{ steps.lower.outputs.image_name }}:buildcache,mode=max + tags: ${{ steps.meta.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} + build-args: | + VERSION=${{ steps.meta.outputs.version }} + R_VERSION=${{ env.R_VERSION }} + MODEL_VERSION=${{ matrix.VERSION }} + IMAGE_VERSION=${{ steps.meta.outputs.version }} + + # ---------------------------------------------------------------------- + # Next are images that depend on base image + # ---------------------------------------------------------------------- + baseplus: + runs-on: ubuntu-latest + needs: base + permissions: + packages: write + strategy: + fail-fast: false + matrix: + name: + - docs + - executor + - api + include: + - name: docs + CONTEXT: . + DOCKERFILE: docker/docs/Dockerfile + PLATFORM: "linux/amd64" + IMAGE: docs + - name: executor + CONTEXT: docker/executor + DOCKERFILE: docker/executor/Dockerfile + PLATFORM: "linux/amd64" + IMAGE: executor + - name: api + CONTEXT: apps/api + DOCKERFILE: apps/api/Dockerfile + PLATFORM: "linux/amd64" + IMAGE: api + + steps: + - uses: actions/checkout@v4 + + # create metadata for image + - name: Docker meta + id: meta + uses: docker/metadata-action@v5 + with: + # list of Docker images to use as base name for tags + images: | + pecan/${{ matrix.IMAGE }} + ghcr.io/${{ github.repository_owner }}/${{ github.repository }}/${{ matrix.IMAGE }} + # generate Docker tags based on the following events/attributes + tags: | + type=schedule + type=ref,event=branch + type=ref,event=pr + type=semver,pattern={{version}} + type=semver,pattern={{major}}.{{minor}} + type=semver,pattern={{major}} + + # setup docker build + - name: Set up QEMU + uses: docker/setup-qemu-action@v3 + + - name: Set up Docker Buildx + id: buildx + uses: docker/setup-buildx-action@v3 + + - name: Inspect Builder + run: | + echo "Name: ${{ steps.buildx.outputs.name }}" + echo "Endpoint: ${{ steps.buildx.outputs.endpoint }}" + echo "Status: ${{ steps.buildx.outputs.status }}" + echo "Flags: ${{ steps.buildx.outputs.flags }}" + echo "Platforms: ${{ steps.buildx.outputs.platforms }}" + + # login to registries + - name: Login to DockerHub + uses: docker/login-action@v3 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_PASSWORD }} + + - name: Login to GitHub Container Registry + uses: docker/login-action@v3 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + # build the docker images + - name: Build and push ${{ matrix.name }} + uses: docker/build-push-action@v6 + with: + context: ${{ matrix.CONTEXT }} + file: ${{ matrix.DOCKERFILE }} + push: true + platforms: ${{ matrix.PLATFORM }} + cache-from: type=registry,ref=pecan/${{ matrix.IMAGE }}:buildcache + cache-to: type=registry,ref=pecan/${{ matrix.IMAGE }}:buildcache,mode=max + tags: ${{ steps.meta.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} + build-args: | + VERSION=${{ steps.meta.outputs.version }} + R_VERSION=${{ env.R_VERSION }} + IMAGE_VERSION=${{ steps.meta.outputs.version }} + + # ---------------------------------------------------------------------- + # Next are images that do not depend on either depends or base image + # ---------------------------------------------------------------------- + extras: + runs-on: ubuntu-latest + permissions: + packages: write + strategy: + fail-fast: false + matrix: + name: + - web + - dbsync + - data + - monitor + - rstudio-nginx + include: + - name: web + CONTEXT: . + DOCKERFILE: docker/web/Dockerfile + PLATFORM: "linux/amd64,linux/arm64" + IMAGE: web + - name: dbsync + CONTEXT: . + DOCKERFILE: shiny/dbsync/Dockerfile + PLATFORM: "linux/amd64" + IMAGE: shiny-dbsync + - name: data + CONTEXT: docker/data + DOCKERFILE: docker/data/Dockerfile + PLATFORM: "linux/amd64,linux/arm64" + IMAGE: data + - name: monitor + CONTEXT: docker/monitor + DOCKERFILE: docker/monitor/Dockerfile + PLATFORM: "linux/amd64,linux/arm64" + IMAGE: monitor + - name: rstudio-nginx + CONTEXT: docker/rstudio-nginx + DOCKERFILE: docker/rstudio-nginx/Dockerfile + PLATFORM: "linux/amd64,linux/arm64" + IMAGE: rstudio-nginx + + steps: + - uses: actions/checkout@v4 + + # create metadata for image + - name: Docker meta + id: meta + uses: docker/metadata-action@v5 + with: + # list of Docker images to use as base name for tags + images: | + pecan/${{ matrix.IMAGE }} + ghcr.io/${{ github.repository_owner }}/${{ github.repository }}/${{ matrix.IMAGE }} + # generate Docker tags based on the following events/attributes + tags: | + type=schedule + type=ref,event=branch + type=ref,event=pr + type=semver,pattern={{version}} + type=semver,pattern={{major}}.{{minor}} + type=semver,pattern={{major}} + + # setup docker build + - name: Set up QEMU + uses: docker/setup-qemu-action@v3 + + - name: Set up Docker Buildx + id: buildx + uses: docker/setup-buildx-action@v3 + + - name: Inspect Builder run: | - echo "${INPUT_PASSWORD}" | docker login -u ${INPUT_USERNAME} --password-stdin - for image in $(docker image ls pecan/*:github --format "{{ .Repository }}"); do - for v in ${PECAN_TAGS}; do - docker tag ${image}:github ${{ env.DOCKERHUB_ORG }}/${image#pecan/}:${v} - docker push ${{ env.DOCKERHUB_ORG }}/${image#pecan/}:${v} - done - done - docker logout - env: - INPUT_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }} - INPUT_PASSWORD: ${{ secrets.DOCKERHUB_PASSWORD }} + echo "Name: ${{ steps.buildx.outputs.name }}" + echo "Endpoint: ${{ steps.buildx.outputs.endpoint }}" + echo "Status: ${{ steps.buildx.outputs.status }}" + echo "Flags: ${{ steps.buildx.outputs.flags }}" + echo "Platforms: ${{ steps.buildx.outputs.platforms }}" + + # login to registries + - name: Login to DockerHub + uses: docker/login-action@v3 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_PASSWORD }} + + - name: Login to GitHub Container Registry + uses: docker/login-action@v3 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + # build the docker images + - name: Build and push ${{ matrix.name }} + uses: docker/build-push-action@v6 + with: + context: ${{ matrix.CONTEXT }} + file: ${{ matrix.DOCKERFILE }} + push: true + platforms: ${{ matrix.PLATFORM }} + cache-from: type=registry,ref=pecan/${{ matrix.IMAGE }}:buildcache + cache-to: type=registry,ref=pecan/${{ matrix.IMAGE }}:buildcache,mode=max + tags: ${{ steps.meta.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} + build-args: | + VERSION=${{ steps.meta.outputs.version }} + R_VERSION=${{ env.R_VERSION }} diff --git a/.github/workflows/download-met-data.yml b/.github/workflows/download-met-data.yml new file mode 100644 index 00000000000..1871176e0c1 --- /dev/null +++ b/.github/workflows/download-met-data.yml @@ -0,0 +1,57 @@ +name : Test Data Download +on : + # allow manual triggering + workflow_dispatch: + + schedule: + # run Thursday 4:30 AM UTC + - cron: '30 4 * * 4' + +env: + R_LIBS_USER: /usr/local/lib/R/site-library + LC_ALL: en_US.UTF-8 + NCPUS: 2 + PGHOST: postgres + CI: true + +jobs: + met-data-download: + runs-on: ubuntu-latest + env: + GITHUB_PAT: ${{ secrets.GITHUB_TOKEN }} + + services: + postgres: + image: mdillon/postgis:9.5 + options: --health-cmd pg_isready --health-interval 10s --health-timeout 5s --health-retries 5 + + container: + image: pecan/depends:R4.1 + + steps: + # checkout source code + - name: work around https://github.com/actions/checkout/issues/766 + run: git config --global --add safe.directory "$GITHUB_WORKSPACE" + - uses: actions/checkout@v4 + with: + set-safe-directory: false + + # install additional tools needed + - name: install utils + run: apt-get update && apt-get install -y postgresql-client qpdf + - name: install new dependencies + run: Rscript scripts/generate_dependencies.R && cd docker/depends && Rscript pecan.depends.R + + # initialize database + - name: db setup + uses: docker://pecan/db:ci + - name: add models to db + run: ./scripts/add.models.sh + + # compile PEcAn code + - name: build + run: make -j1 + + - name: CRUNCEP + run: | + Rscript ./tests/test_met_downloads.R --settings ./tests/met_download_settings/docker.CRUNCEP.xml diff --git a/.github/workflows/integration-test.yml b/.github/workflows/integration-test.yml new file mode 100644 index 00000000000..6da48ba68cf --- /dev/null +++ b/.github/workflows/integration-test.yml @@ -0,0 +1,31 @@ +name : Integration Tests +on : + # allow manual triggering + workflow_dispatch: + + schedule: + # run Thursday 4:30 AM UTC + - cron: '30 4 * * 4' +jobs: + test: + runs-on: ubuntu-20.04 + + env: + GITHUB_PAT: ${{ secrets.GITHUB_TOKEN }} + + container: + image: pecan/base:develop + + steps: + - name: Checkout source code + uses: actions/checkout@v4 + + - name: Run tests + run: | + for FILE in modules/data.atmosphere/inst/integrationTests/*; do + if echo "$FILE" | grep -q "ERA5"; then + echo "Skipping file : $FILE" + else + Rscript "$FILE" + fi + done diff --git a/.github/workflows/prlabeler.yml b/.github/workflows/prlabeler.yml new file mode 100644 index 00000000000..921b6fd4512 --- /dev/null +++ b/.github/workflows/prlabeler.yml @@ -0,0 +1,22 @@ +# This workflow is based on github action official label action v4. +# This workflow action is triggered on pull request event(on both fork & inside repo) +# Labels will be applied based on filepath modification in PR. +# This workflow uses a regex based labeling config file(.github/labeler.yml) to take labeling decision. + +name: "PR Labeler" +on: + - pull_request_target +jobs: + label: + permissions: + contents: read + pull-requests: write + runs-on: ubuntu-latest + + steps: + - uses: actions/labeler@v5 + with: + repo-token: "${{ secrets.GITHUB_TOKEN }}" + configuration-path: ".github/labeler.yml" + sync-labels: false + dot: true diff --git a/.github/workflows/sipnet.yml b/.github/workflows/sipnet.yml new file mode 100644 index 00000000000..b1b3eeca8d6 --- /dev/null +++ b/.github/workflows/sipnet.yml @@ -0,0 +1,69 @@ +name: sipnet + +on: + workflow_call: + inputs: + R-version: + required: true + type: string + +env: + R_LIBS_USER: /usr/local/lib/R/site-library + LC_ALL: en_US.UTF-8 + NCPUS: 2 + PGHOST: postgres + CI: true + +jobs: + sipnet: + runs-on: ubuntu-latest + env: + GITHUB_PAT: ${{ secrets.GITHUB_TOKEN }} + + services: + postgres: + image: mdillon/postgis:9.5 + options: --health-cmd pg_isready --health-interval 10s --health-timeout 5s --health-retries 5 + + container: + image: pecan/depends:R${{ inputs.R-version }} + + steps: + # checkout source code + - name: work around https://github.com/actions/checkout/issues/766 + run: git config --global --add safe.directory "$GITHUB_WORKSPACE" + - uses: actions/checkout@v4 + with: + set-safe-directory: false + + # install additional tools needed + - name: install utils + run: apt-get update && apt-get install -y postgresql-client qpdf + - name: install new dependencies + run: Rscript scripts/generate_dependencies.R && cd docker/depends && Rscript pecan.depends.R + + # initialize database + - name: db setup + uses: docker://pecan/db:ci + - name: add models to db + run: ./scripts/add.models.sh + + # install sipnet + - name: Check out SIPNET + uses: actions/checkout@v4 + with: + repository: PecanProject/sipnet + path: sipnet + set-safe-directory: false + - name: install sipnet + run: | + cd ${GITHUB_WORKSPACE}/sipnet + make + + # compile PEcAn code + - name: build + run: make -j1 + + # run SIPNET test + - name: integration test + run: ./tests/integration.sh ghaction diff --git a/.github/workflows/styler-actions.yml b/.github/workflows/styler-actions.yml index 9821d3f3da9..51d2e47e839 100644 --- a/.github/workflows/styler-actions.yml +++ b/.github/workflows/styler-actions.yml @@ -14,13 +14,13 @@ jobs: run: echo '${{ steps.file_changes.outputs.files_modified }}' - name: work around https://github.com/actions/checkout/issues/766 run: git config --global --add safe.directory "$GITHUB_WORKSPACE" - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 with: set-safe-directory: false - - uses: r-lib/actions/pr-fetch@master + - uses: r-lib/actions/pr-fetch@v2 with: repo-token: ${{ secrets.GITHUB_TOKEN }} - - uses: r-lib/actions/setup-r@master + - uses: r-lib/actions/setup-r@v2 - name: Install styler run: | Rscript -e 'install.packages("styler")' @@ -39,7 +39,7 @@ jobs: git add \*.R git add \*.Rmd if [ "$(git diff --name-only --cached)" != "" ]; then git commit -m 'automated syle update' ; fi - - uses: r-lib/actions/pr-push@master + - uses: r-lib/actions/pr-push@v2 with: repo-token: ${{ secrets.GITHUB_TOKEN }} @@ -51,7 +51,7 @@ jobs: steps: - name: work around https://github.com/actions/checkout/issues/766 run: git config --global --add safe.directory "$GITHUB_WORKSPACE" - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 with: set-safe-directory: false - uses: r-lib/actions/pr-fetch@v1 @@ -62,7 +62,8 @@ jobs: - name: install any new dependencies env: GITHUB_PAT: ${{ secrets.GITHUB_TOKEN }} - run: Rscript docker/depends/pecan.depends.R + working-directory: docker/depends + run: Rscript pecan.depends.R - id: file_changes uses: trilom/file-changes-action@v1.2.4 - name : make @@ -82,7 +83,7 @@ jobs: run: | git config --global user.email "pecan_bot@example.com" git config --global user.name "PEcAn stylebot" - git add \*.Rd \*NAMESPACE Makefile.depends docker/depends/pecan.depends.R + git add \*.Rd \*NAMESPACE Makefile.depends docker/depends/pecan_package_dependencies.R docker/depends/pecan_deps_from_github.txt if [ "$(git diff --name-only --cached)" != "" ]; then git commit -m 'automated documentation update' ; fi - uses: r-lib/actions/pr-push@master with: diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml new file mode 100644 index 00000000000..dc92869d6f4 --- /dev/null +++ b/.github/workflows/test.yml @@ -0,0 +1,61 @@ +name: test + +on: + workflow_call: + inputs: + R-version: + required: true + type: string + +env: + R_LIBS_USER: /usr/local/lib/R/site-library + LC_ALL: en_US.UTF-8 + NCPUS: 2 + PGHOST: postgres + CI: true + +jobs: + test: + runs-on: ubuntu-latest + + env: + GITHUB_PAT: ${{ secrets.GITHUB_TOKEN }} + + services: + postgres: + image: mdillon/postgis:9.5 + options: --health-cmd pg_isready --health-interval 10s --health-timeout 5s --health-retries 5 + + container: + image: pecan/depends:R${{ inputs.R-version }} + + steps: + # checkout source code + - name: work around https://github.com/actions/checkout/issues/766 + run: git config --global --add safe.directory "$GITHUB_WORKSPACE" + - uses: actions/checkout@v4 + with: + set-safe-directory: false + + # install additional tools needed + - name: install utils + run: apt-get update && apt-get install -y postgresql-client qpdf + - name: update dependency lists + run: Rscript scripts/generate_dependencies.R + - name: check for out-of-date dependencies files + uses: infotroph/tree-is-clean@v1 + - name: install newly-added dependencies + working-directory: docker/depends + run: Rscript pecan.depends.R + + # initialize database + - name: db setup + uses: docker://pecan/db:ci + - name: add models to db + run: ./scripts/add.models.sh + + # run PEcAn tests + - name: test + run: make -j1 test + - name: check for out-of-date files + uses: infotroph/tree-is-clean@v1 diff --git a/CHANGELOG.md b/CHANGELOG.md index 5edf0af2973..0cbd0976ae5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,19 +7,27 @@ For more information about this file see also [Keep a Changelog](http://keepacha ## [Unreleased] -Some changes have been made to the docker-compose file. Check your .env file to -see if you need to change any of these: -- TRAEFIK_HOST is now TRAEFIK_HOSTNAME without the Host: and ;, for example if you have - `TRAEFIK_HOST=Host:pecan.example.com;` you will need to change this to - `TRAEFIK_HOST=pecan.example.com`. -- TRAEFIK_IPFILTER is no longer used, and should be removed. -- TRAEFIK_HTTP_PORT now defaults to port 80 -- TRAEFIK_HTTPS_PORT now defaults to port 443 -- TRAEFIK_ACME_ENABLE is no longer used. If you want to use https you will need to add - `docker-compose.https.yml` to your command line. You can use the script `compose.sh` - to star PEcAn with https. -- TRAEFIK_HTTPS_OPTIONS is no longer used, this is the default when you use https. -- TRAEFIK_HTTP_REDIRECT is no longer used, this is the default when you use https. +### Added + +### Fixed +- updated github action to build docker images + +### Changed + +- The following components have changed their licensing. With approval of all their contributors, we now provide them under a BSD 3-clause license rather than the previously used NCSA Open Source license. As a reminder, we intend to relicense the entire system and this list will expand as we gather permission from the relevant copyright owners. + * `apps/api` + * Shiny apps `dbsync`, `BenchmarkReport`, `Data-Ingest`, `Elicitation`, `ForecastingDashboard`, `global-sensitivity`, `Pecan.depend`, `SDAdashboard`, and `ViewMet` + * Base packages `PEcAn.all`, `PEcAn.DB`, `PEcAn.QAQC`, `PEcAn.remote`, `PEcAn.settings`, `PEcAn.visualization`, and `PEcAn.workflow` + * Model packages `PEcAn.BASGRA`, `PEcAn.CLM45`, `PEcAn.DALEC`, `PEcAn.dvmdostem`, `PEcAn.FATES`, `PEcAn.GDAY`, `PEcAn.JULES`, `PEcAn.LDNDC`, `PEcAn.LINKAGES`, `PEcAn.LPJGUESS`, `PEcAn.MAAT`, `PEcAn.MAESPA`, `PEcAn.PRELES`, `PEcAn.SIBCASA`, `PEcAn.SIPNET`, `PEcAn.STICS`, and the new model package template. + * Modules `PEcAn.allometry`, `PEcAn.assim.batch`, `PEcAn.data.mining`, `PEcAn.emulator`, `PEcAn.MA`, `PEcAn.photosynthesis`, `PEcAn.priors`, and `PEcAn.RTM`. +- Renamed master branch to main +- `PEcAn.all::pecan_version()` now reports commit hashes as well as version numbers for each installed package. + +### Removed + +- Remove Browndog support for conversions (#3348, @Sweetdevil144). + +## [1.8.0] - 2024-07-12 ### Added - Created a new soilgrids function to extract the mean soil organic carbon profile with associated undertainty values at each depth for any lat/long location (#3040). Function was created for the CMS SDA workflow @@ -31,16 +39,21 @@ see if you need to change any of these: - Added a new function `unit_is_parseable` in PEcAn.utils to replace `udunits2::ud.is.parseable`. (#3002; @nanu1605) - Initial LDNDC model coupling -- `PEcAn.settings::read.settings()` now strips comments so HTML style comments (e.g. ``) are now allowed in pecan.xml files -- `PEcAn.logger::setLevel()` now invisibly returns the previously set logger level -- Warning messages for `model2netcdf.ed2()` coming from `ncdf4::ncvar_put()` now are prepended with the variable name for easier debugging (#3078) - Added optional `process_partial` argument to `model2netcdf.ED2()` to allow it to process existing output from failed runs. -- Added litter_mass_content_of_water to standard_vars table -- Added litter_mass_content_of_water to model2netcdf.SIPNET +- Added litter_mass_content_of_water to standard_vars table and model2netcdf.SIPNET - Added all SIPNET state variables to read_restart and write_restart - Added Observation preparation functions into the SDA workflow, which supports AGB, LAI, Soil Carbon, and Soil moisture. -- We are slowly change the license from NCSA opensource to BSD-3 to help with publishing PEcAn to CRAN. - Added an optional `pfts` argument to `PEcAn.uncertainty::run.sensitivity.analysis()` so that sensitivity analysis and variance decomposition can be run on a subset of PFTs defined in `settings` if desired (#3155). +- Added new features of the SDA function including: 1) allow user-defined free-run mode; +2) allow user-defined parallel mode for the qsub submission; 3) allow user-defined email option to report the progress. +- `PEcAnAssimSequential::GET.MultiSite()` now supports the parallelization of multi-chain MCMC sampling with the fully randomized inits function. +- Added the new feature of the block-based SDA workflow, which supports the parallel computation. +- Added new SDA workflow for the 342 North America anchor sites. +- Added new feature of preparing initial conditions for MODIS LAI, AGB, ISCN SOC, and soil moisture across NA anchor sites. +- Added GEDI AGB preparation workflow. +- Added new feature of downloading datasets from the NASA DAAC ORNL database. +- Extended downscale function and created 'downscale_hrly' so that it handles more frequent data +- Added 'aggregate' as a new feature for downscaled data ### Fixed @@ -67,28 +80,50 @@ convert data for a single PFT fixed (#1329, #2974, #2981) - `PEcAn.data.land::gSSURGO.Query` has been updated to work again after changes to the gSSURGO API. - `PEcAn.settings::read.settings()` now prints a warning when falling back on default `"pecan.xml"` if the named `inputfile` doesn't exist. - fqdn() can access hostname on Windows (#3044 fixed by #3058) -- The model2netcdf_SIPNET function can now export full year nc files by using - the cdo_setup argument in the template job file. In detail, people will need - to specify cdosetup = "module load cdo/2.0.6" in the host section. More details - are in the Create_Multi_settings.R script. (#3052) - write.config.xml.ED2() wasn't using the tag in settings correctly (#3080) - runModule.get.trait.data() now correctly respects the settings$database$bety$write setting (#2968) - Fixed a bug in `model2netcdf.ed2()` where .nc file connections were being closed multiple times, printing warnings (#3078) - Fixed a bug causing the model2netcdf.ED2() step in jobs.sh to be incorrectly written (#3075) - Fixed a bug where `plant_min_temp` trait value wasn't being converted from ºC to K when writing config file for ED2 (#3110) - Fixed a bug in `PEcAn.ED2::read_E_files()` affecting `PEcAn.ED2::model2netcdf.ED2()` that resulted in incorrect calculations (#3126) -- DDBH (change in DBH over time) is no longer extracted and summarized from monthly -E- files by `PEcAn.ED2::model2netcdf.ED2()`. We are not sure it makes sense to summarize this variable across cohorts of different sizes. -- The `yr` and `yfiles` arguments of `PEcAn.ED2::read_E_files()` are no longer used and the simulation date is extracted from the names of the .h5 files output by ED2. -- Update Dockerfile for sipnet/maespa/template to use pecan/models:tag to build. +- Fixed a bug in `PEcAn.utils::ud_convert()` where it failed with objects of class "difftime" introduced by refactoring to use the `units` package instead of `udunits` (#3012) +- The propagation of aqq and bqq for the SINGLE Q type has been corrected. +- The issue where the indirect constraints will be increase with the increase of the covariance sizes. +- Updated URL for MERRA downloads (#2888) ### Changed -- Using R4.0 and R4.1 tags to build PEcAn. Default is now 4.1 +- The default build of PEcAn now uses R 4.1. PEcAn is also tested daily on R 4.2 and weekly on R-devel. R 4.0 and older are no longer tested and will probably not work. +- Some changes have been made to the docker-compose file. Check your .env file to + see if you need to change any of these: + - TRAEFIK_HOST is now TRAEFIK_HOSTNAME without the `Host:` and `;`, for example if you have + `TRAEFIK_HOST=Host:pecan.example.com;` you will need to change this to + `TRAEFIK_HOST=pecan.example.com`. + - TRAEFIK_IPFILTER is no longer used, and should be removed. + - TRAEFIK_HTTP_PORT now defaults to port 80 + - TRAEFIK_HTTPS_PORT now defaults to port 443 + - TRAEFIK_ACME_ENABLE is no longer used. If you want to use https you will need to add + `docker-compose.https.yml` to your command line. You can use the script `compose.sh` + to start PEcAn with https. + - TRAEFIK_HTTPS_OPTIONS is no longer used, this is the default when you use https. + - TRAEFIK_HTTP_REDIRECT is no longer used, this is the default when you use https. +- Updated Dockerfile for sipnet/maespa/template to use pecan/models:tag to build. +- The `yr` and `yfiles` arguments of `PEcAn.ED2::read_E_files()` are no longer used + and the simulation date is extracted from the names of the .h5 files output by ED2. +- DDBH (change in DBH over time) is no longer extracted and summarized from + monthly -E- files by `PEcAn.ED2::model2netcdf.ED2()`. We are not sure it makes + sense to summarize this variable across cohorts of different sizes. +- `PEcAn.SIPNET::model2netcdf.SIPNET` can now export full year nc files by using + the cdo_setup argument in the template job file. In detail, people will need + to specify `cdosetup = "module load cdo/2.0.6"`` in the host section. More details + are in the Create_Multi_settings.R script. (#3052) +- `PEcAn.settings::read.settings()` now strips comments so HTML style comments (e.g. ``) are now allowed in pecan.xml files +- `PEcAn.logger::setLevel()` now invisibly returns the previously set logger level +- Warning messages for `model2netcdf.ed2()` coming from `ncdf4::ncvar_put()` now are prepended with the variable name for easier debugging (#3078) - Database connections consistently use `DBI::dbConnect` instead of the deprecated `dplyr::src_postgres` (#2881). This change should be invisible to most users, but it involved converting a lot of internal variables from `bety$con` to `con`. If you see errors involving these symbols it means we missed a place, so please report them as bugs. - `PEcAn.utils::download.url` argument `retry404` is now renamed to `retry` and now functions as intended (it was being ignored completely before). -- Update URL for MERRA downloads (#2888) -- PEcAn.logger is now BSD-3 License +- We have begun the process of relicensing the PEcAn packages from the previous NCSA license to BSD-3, with the consent of all contributors. PEcAn.logger is now distributed as BSD-3; others will be changed as their authors sign off. - Skipped ICOS and MERRA download tests when running in github actions - Converted .zenodo.json to CITATION.cff - Using traefik 2.5 instead of 1.7 @@ -101,6 +136,7 @@ convert data for a single PFT fixed (#1329, #2974, #2981) - Internal changes to keep up to date with tidyselect v1.2.0 - The `PEcAn.utils::download.file()` function has now been renamed to `PEcAn.utils::download_file()` - The `regrid()` and `grid2netcdf()` function from `PEcAn.utils` have been moved to `PEcAn.data.remote` package. +- Web is now build using PHP 8 since PHP 7 is EOL. ### Removed diff --git a/CITATION.cff b/CITATION.cff index ccdc9dc297c..7af92146298 100644 --- a/CITATION.cff +++ b/CITATION.cff @@ -2,7 +2,7 @@ cff-version: 1.2.0 title: >- The Predictive Ecosystem Analyzer (PEcAn) is an integrated ecological bioinformatics toolbox. -version: 1.7.2 +version: 1.8.0 abstract: >- The Predictive Ecosystem Analyzer (PEcAn) (see pecanproject.org) is an integrated ecological @@ -77,7 +77,7 @@ authors: - affiliation: University of Cambridge given-names: Anne Thomas orcid: 'https://orcid.org/0000-0002-2808-6462' - - affiliation: CK Black Science Consulting + - affiliation: Pools and Fluxes LLC given-names: Chris Black orcid: 'https://orcid.org/0000-0001-8382-298X' - affiliation: Rutgers University @@ -120,7 +120,9 @@ authors: - given-names: Eric R. Scott affiliation: University of Arizona orcid: 'https://orcid.org/0000-0002-7430-7879' - + - given-names: Harunobu Ishii + affiliation: Boston University Software & Application Innovation Lab(SAIL) + preferred-citation: type: article title: Facilitating feedbacks between field measurements and ecosystem models diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md new file mode 100644 index 00000000000..776a2fd15e4 --- /dev/null +++ b/CODE_OF_CONDUCT.md @@ -0,0 +1,45 @@ +# Contributor Covenant Code of Conduct + +**Our Pledge** + +In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation. + +**Our Standards** + +Examples of behavior that contributes to creating a positive environment include: + + * Using welcoming and inclusive language + * Being respectful of differing viewpoints and experiences + * Gracefully accepting constructive criticism + * Focusing on what is best for the community + * Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a professional setting + + + +**Our Responsibilities** + +Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. + +**Scope** + +This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers. + +**Enforcement** + +Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at pecanproj[at]gmail.com. All complaints will be reviewed and investigated and will result in a response that is deemed necessary and appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership. + +**Attribution** + +This Code of Conduct is adapted from the [Contributor Covenant](http://contributor-covenant.org/) version 1.4, available at [http://contributor-covenant.org/version/1/4](http://contributor-covenant.org/version/1/4/). diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 17c16a68581..1ec2e870917 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,24 +1,24 @@ # How to contribute -Third-party contributions are highly encouraged for PEcAn and will grow the code as well as the understanding of PEcAn and its applications. The core development team can not add all models that exist to PEcAn or all possible scenarios and analysis that people want to conduct. Our goal is to keep it as easy as possible for you contribute changes that get things working in your environment. +Third-party contributions are highly encouraged for PEcAn and will grow the code as well as the understanding of PEcAn and its applications. The core development team can not add all models that exist to PEcAn or all possible scenarios and analysis that people want to conduct. Our goal is to keep it as easy as possible for you contribute changes that get things working in your environment. There are a few guidelines that we need contributors to follow so that we can have a chance of keeping on top of things. ## PEcAn CORE vs Models vs Modules New functionality is typically directed toward modules to provide a slimmer PEcAn Core, reducing the requirements to get PEcAn running on different platforms, especially HPC machines, and to allow greater freedom for modules and models. -Generally, new model should be added to the models folder and new modules should be added to the modules folder. +Generally, new model should be added to the models folder and new modules should be added to the modules folder. Exceptions include code that is reused in many models or modules and wrapper functions that call specific implementations in models; these can be placed in the core packages. -If you are unsure of whether your contribution should be implemented as a model, module or part of PEcAn Core, you may visit [Chat Room](https://join.slack.com/t/pecanproject/shared_invite/enQtMzkyODUyMjQyNTgzLWEzOTM1ZjhmYWUxNzYwYzkxMWVlODAyZWQwYjliYzA0MDA0MjE4YmMyOTFhMjYyMjYzN2FjODE4N2Y4YWFhZmQ) or ask on the pecan-develop mailing list for advice. +If you are unsure of whether your contribution should be implemented as a model, module or part of PEcAn Core, you may join our [Slack Channel](https://join.slack.com/t/pecanproject/shared_invite/enQtMzkyODUyMjQyNTgzLWEzOTM1ZjhmYWUxNzYwYzkxMWVlODAyZWQwYjliYzA0MDA0MjE4YmMyOTFhMjYyMjYzN2FjODE4N2Y4YWFhZmQ). ## Creating Issues - Make sure you have a GitHub account. - Search GitHub and Google to see if your issue has already been reported - - Create an issue in GitHub, assuming one does not already exist. - - Clearly describe the issue including steps to reproduce when it is a bug. - - Make sure you fill in the earliest version that you know has the issue. + - Create an issue in GitHub, assuming one does not already exist. + - Clearly describe the issue including steps to reproduce when it is a bug. + - Make sure you fill in the earliest version that you know has the issue. - Ask @dlebauer, @mdietze or @robkooper to add you to the PEcAn project if you plan on fixing the issue. ## Getting Started @@ -33,30 +33,33 @@ At this point you will have a copy of PEcAn and you are almost ready to work on At this point you will have a copy of the pecan repo in your personal space. Next steps are to setup your local copy to work with the forked version. Introduce your self to GIT (if you have not done this yet), make sure you use an email associated with your GitHub account. + ```bash git config --global user.name "John Doe" git config --global user.email johndoe@example.com ``` Switch pecan to your fork + ```bash git remote set-url origin https://github.com//pecan.git ``` -Setup pecan to be able to fetch from the master/develop +Setup pecan to be able to fetch from the main/develop + ```bash git remote add upstream https://github.com/PecanProject/pecan.git ``` ## PEcAn Branches -PEcAn uses two protected branches, the master branch and the develop branch. The master branch will match the official releases, but all work will be done on the develop branch. Make sure that you create branches from the develop branch. This should be the default branch in your git repository. +PEcAn uses two protected branches, the main branch and the develop branch. The main branch will match the official releases, but all work will be done on the develop branch. Make sure that you create branches from the develop branch. This should be the default branch in your git repository. ## Adding Features When you add a new feature always create an issue first, this allows others to comment and give you tips. It will also help us keep track of what people are adding and with new releases helps us to write new release notes and give you credit for your work. -Secondly always work in a branch, never work on the master or develop branch. Keep your master and develop branch in sync with the master and develop of the official PEcAn repository. This makes the pull requests (you do want your work to be in the main branch right?) easier for us. +Secondly always work in a branch, never work on the main or develop branch. Keep your main and develop branch in sync with the main and develop of the official PEcAn repository. This makes the pull requests (you do want your work to be in the main branch right?) easier for us. Finally try to keep your branches focused on fixing/adding only one feature and try not fall in the trap of doing a lot of things in a single branch. This will not only make it harder for us to process your pull request but makes it take longer before you can submit your pull request. Small pull requests are more likely to be looked at faster and pulled into the develop branch faster. @@ -66,40 +69,54 @@ Here is a simplified workflow on how add a new feature: Update your develop (both locally and on GitHub) -``` +```bash git fetch upstream git checkout develop git merge upstream/develop git push ``` -### Create a branch to do your work. +### Create a branch to do your work -A good practice is to call the branch in the form of GH- followed by the title of the issue. This makes it easier to find out the issue you are trying to solve and helps us to understand what is done in the branch. Calling a branch my-work is confusing. Names of branch can not have a space, and should be replaced with a hyphen. +A good practice is to call the branch in the form of `GH-` followed by the title of the issue. This makes it easier to find out the issue you are trying to solve and helps us to understand what is done in the branch. Calling a branch my-work is confusing. Names of branch can not have a space, and should be replaced with a hyphen. -``` +```bash git checkout -b GH-issuenumber-title-of-issue ``` ### Work and commit -Do you work, and commit as you see fit.Make your commit messages helpful. +Do you work, and commit as you see fit. Make your commit messages helpful. + +### Update other files (CITATION, NEWS, CHANGELOG) -### Push your changes up to GitHub. +Your PR should include: + +- CITATION.cff: if you are making or have made a non-trivial contribution (please ask if unsure; our approach is inclusive), add your name to the author section. +- NEWS.md: for each package +- CHANGELOG.md: add changes to [Unreleased] section + + +### Push your changes up to GitHub If this is the first time pushing to GitHub you will need to extended command, other wise you can simply do a `git push`. -``` +```bash git push -u origin GH-issuenumber-title-of-issue ``` + ### Pull Request When finished create a pull request from your branch to the main pecan repository. + When submitting a pull request, you retain authorship of the code you contribute. However, you are giving the PEcAn Project permission to distribute your contributions under either or both, at our discretion, of: + - The license listed at PR opening time for the code you are contributing to, + - and/or the BSD 3-clause license. + ## Additional Resources -- [Adding models to PEcAn](https://pecanproject.github.io/pecan-documentation/master/adding-an-ecosystem-model.html) -- [PEcAn configuration files](https://pecanproject.github.io/pecan-documentation/master/pecan-xml-configuration.html) -- [Development help](https://pecanproject.github.io/pecan-documentation/master/developer-guide.html) -- [PEcAn Code of Conduct](https://pecanproject.github.io/pecan-documentation/master/contributor-covenant-code-of-conduct.html) +- [Adding models to PEcAn](https://pecanproject.github.io/pecan-documentation/latest/adding-an-ecosystem-model.html) +- [PEcAn configuration files](https://pecanproject.github.io/pecan-documentation/latest/pecan-xml-configuration.html) +- [Development help](https://pecanproject.github.io/pecan-documentation/latest/developer-guide.html) +- [PEcAn Code of Conduct](CODE_OF_CONDUCT.md) diff --git a/DEBUGING.md b/DEBUGING.md index b62f71558d6..5c9c7c87aa6 100644 --- a/DEBUGING.md +++ b/DEBUGING.md @@ -1,3 +1,5 @@ +# DEBUGGING.MD + Adding the following to the workflow.R or to your .Rprofile will enable printing of a stacktrace in case something goes wrong. This will help with the development of PEcAn. diff --git a/DEV-INTRO.md b/DEV-INTRO.md index f865c36c2cc..59bde74b01a 100644 --- a/DEV-INTRO.md +++ b/DEV-INTRO.md @@ -1,14 +1,14 @@ # PEcAn Development -This is a minimal guide to getting started with PEcAn development under Docker. You can find more information about docker in the [pecan documentation](https://pecanproject.github.io/pecan-documentation/master/docker-index.html). +This is a minimal guide to getting started with PEcAn development under Docker. You can find more information about docker in the [pecan documentation](https://pecanproject.github.io/pecan-documentation/latest/docker-index.html). ## Requirements and Recommendations -Docker is the primary software requirement; it handles all of the other software dependencies. This has been tested on Ubuntu 18.04 and above, MacOS Catalina, and Windows 10 with Windows Subsystem for Linux 2. +Docker is the primary software requirement; it handles all of the other software dependencies. This has been tested on Ubuntu 18.04 and above, MacOS Sonoma, and Windows 10 with Windows Subsystem for Linux 2 (following the Linux instructions). - Software (installation instructions below): - - Docker version 19 - - Docker-compose version 1.26 + - Docker version 26 + - Docker Compose version 2.27 - Git (optional until you want to make major changes) - Hardware - 100 GB storage (minimum 50 GB) @@ -29,118 +29,99 @@ cd pecan ## Developing in Docker -The use of Docker in PEcAn is described in detail in the [PEcAn documentation](https://pecanproject.github.io/pecan-documentation/master/docker-index.html). This is intended as a quick start. +The use of Docker in PEcAn is described in detail in the [PEcAn documentation](https://pecanproject.github.io/pecan-documentation/latest/docker-index.html). This is intended as a quick start. ### Installing Docker -To install Docker and docker-compose, see the docker documentation: -- Docker Desktop in [Mac OSX](https://docs.docker.com/docker-for-mac/install/) or [Windows](https://docs.docker.com/docker-for-windows/install/) -- Docker (e.g. [Ubuntu](https://docs.docker.com/compose/install/)) and [docker-compose](https://docs.docker.com/compose/install/) on your linux operating system. +To install Docker and Docker Compose, see the docker documentation: + +- Docker Desktop (includes Docker Compose) in [MacOS](https://docs.docker.com/desktop/install/mac-install/) or [Windows](https://docs.docker.com/desktop/install/windows-install/) or [Linux](https://docs.docker.com/desktop/install/linux-install/) +- On Linux, you can also choose to separately install the [Docker engine](https://docs.docker.com/engine/install/) and [Docker Compose](https://docs.docker.com/compose/install/). + +_Note for Linux (including Windows WSL2) users:_ add your user to the docker group. This will prevent you from having to use `sudo` to start the docker containers, and makes sure that any file that is written to a mounted volume is owned by you. This can be done using -_Note for Linux users:_ add your user to the docker group. This will prevent you from having to use `sudo` to start the docker containers, and makes sure that any file that is written to a mounted volume is owned by you. This can be done using ```sh # for linux users -sudo adduser ${USER} docker`. +sudo adduser ${USER} docker. ``` ### Deploying PEcAn in Docker To get started with development in docker we need to bring up the docker stack first. In the main pecan folder you will find the [docker-compose.yml](docker-compose.yml) file that can be used to bring up the pecan stack. There is also the [docker-compose.dev.yaml](docker-compose.dev.yaml) file that adds additional containers, and changes some services to make it easier for development. -By default docker-compose will use the files `docker-compose.yml` and `docker-compose.override.yml`. We will use the default `docker-compose.yml` file from PEcAn. The `docker-compose.override.yml` file can be used to configure it for your specific environment, in our case we will use it to setup the docker environment for development. Copy the `docker-compose.dev.yml` file to `docker-compose.override.yml` to start working with your own override file, i.e. : +By default Compose will use the files `docker-compose.yml` and `docker-compose.override.yml`. We will use the default `docker-compose.yml` file from PEcAn. The `docker-compose.override.yml` file can be used to configure it for your specific environment, in our case we will use it to setup the docker environment for development. Copy the `docker-compose.dev.yml` file to `docker-compose.override.yml` to start working with your own override file, i.e. : For Linux/MacOSX -``` +```sh cp docker-compose.dev.yml docker-compose.override.yml ``` -For Windows - -``` -copy docker-compose.dev.yml docker-compose.override.yml -``` - -You can now use the command `docker-compose` to work with the containers setup for development. **The rest of this document assumes you have done this step.** +You can now use the command `docker compose` to work with the containers setup for development. **The rest of this document assumes you have done this step.** ### First time setup -The steps in this section only need to be done the first time you start working with the stack in docker. After this is done you can skip these steps. You can find more detail about the docker commands in the [pecan documentation](https://pecanproject.github.io/pecan-documentation/master/docker-index.html). +The steps in this section only need to be done the first time you start working with the stack in docker. After this is done you can skip these steps. You can find more detail about the docker commands in the [pecan documentation](https://pecanproject.github.io/pecan-documentation/latest/docker-index.html). -* setup .env file -* create folders to hold the data -* load the postgresql database -* load some test data -* copy all R packages (optional but recommended) -* setup for web folder development (optional) +- setup .env file +- create folders to hold the data +- load the postgresql database +- load some test data +- copy all R packages (optional but recommended) +- setup for web folder development (optional) #### .env file -You can copy the [`docker/env.example`](docker/env.example) file as .env in your pecan folder. The variables we want to modify are: - -For Linux/MacOSX +You can copy the [`docker/env.example`](docker/env.example) file as .env in your pecan folder. ```sh cp docker/env.example .env ``` -For Windows - -``` -copy docker/env.example .env -``` +The variables we want to modify are: -* `COMPOSE_PROJECT_NAME` set this to pecan, the prefix for all containers -* `PECAN_VERSION` set this to develop, the docker image we start with +- `COMPOSE_PROJECT_NAME`, the prefix for all containers. Set this to "pecan". +- `PECAN_VERSION`, the docker image we start with. Set this to "develop". -Both of these variables should also be uncommented by removing the # preceding them. At the end you should see the following if you run the following command `egrep -v '^(#|$)' .env`. If you have a windows system, you will need to set the variable PWD as well, and for linux you will need to set UID and GID (for rstudio). +Both of these variables should also be uncommented by removing the # preceding them. -For Linux +At the end you should see the following if you run the command `egrep -v '^(#|$)' .env`: -``` -echo "COMPOSE_PROJECT_NAME=pecan" >> .env -echo "PECAN_VERSION=develop" >> .env -echo "UID=$(id -u)" >> .env -echo "GID=$(id -g)" >> .env +```sh +COMPOSE_PROJECT_NAME=pecan +PECAN_VERSION=develop ``` -For MacOSX +If you have a Linux system you will need to set UID and GID (these are needed by rstudio when sharing files between host and container): -``` -echo "COMPOSE_PROJECT_NAME=pecan" >> .env -echo "PECAN_VERSION=develop" >> .env +```sh +echo "UID=$(id -u)" >> .env +echo "GID=$(id -g)" >> .env ``` -For Windows: - -``` -echo "COMPOSE_PROJECT_NAME=pecan" >> .env -echo "PECAN_VERSION=develop" >> .env -echo "PWD=%CD%" >> .env -``` +Later you may wish to modify other variables in `.env`, but for this intro please confirm that the system is working with this minimal configuration first. Once you have setup `docker-compose.override.yml` and the `.env` files, it is time to pull all docker images that will be used. Doing this will make sure you have the latest version of those images on your local system. -``` -docker-compose pull +```sh +docker compose pull ``` -#### folders (optional) +#### Folders (optional) -The goal of the development is to share the development folder with your container, whilst minimizing the latency. What this will do is setup the folders to allow for your pecan folder to be shared, and keep the rest of the folders managed by docker. Some of this is based on a presentation done during [DockerCon 2020](https://docker.events.cube365.net/docker/dockercon/content/Videos/92BAM7vob5uQ2spZf). In this talk it is recommended to keep the database on the filesystem managed by docker, as well as any other folders that are not directly modified on the host system (not using the docker managed volumes could lead to a large speed loss when reading/writing to the disk). The `docker-compose.override.yml` can be modified to copy all the data to the local filesystem, by uncommenting the appropriate blocks. If you are sharing more than the pecan home directory you will need to make sure that these folder exist. As from the video, it is recommended to keep these folders outside of the actual pecan folder to allow for better caching capabilities of the docker system. +The goal of the development is to share the development folder with your container, whilst minimizing the latency. What this will do is setup the folders to allow for your pecan folder to be shared, and keep the rest of the folders managed by docker. Some of this is based on Dave Scott's DockerCon 2020 presentation ["New Docker Desktop Filesharing Features"](https://www.youtube.com/watch?v=gyddZyc8r48&t=512s). In this talk it is recommended to keep the database on the filesystem managed by docker, as well as any other folders that are not directly modified on the host system (not using the docker managed volumes could lead to a large speed loss when reading/writing to the disk). The `docker-compose.override.yml` can be modified to copy all the data to the local filesystem, by uncommenting the appropriate blocks. If you are sharing more than the pecan home directory you will need to make sure that these folder exist. As from the video, it is recommended to keep these folders outside of the actual pecan folder to allow for better caching capabilities of the docker system. If you have uncommented the volumes in `docker-compose.override.yml` you will need to create the folders. Assuming you have not modified the values, you can do this with: +```sh +mkdir -p $HOME/volumes/pecan/{R_library,pecan,portainer,postgres,rabbitmq,traefik} ``` -mkdir -p $HOME/volumes/pecan/{lib,pecan,portainer,postgres,rabbitmq,traefik} -``` - The following volumes are specified: - **pecan_home** : is the checked out folder of PEcAn. This is shared with the executor and rstudio container allowing you to share and compile PEcAn. (defaults to current folder) - **pecan_web** : is the checked out web folder of PEcAn. This is shared with the web container allowing you to share and modify the PEcAn web app. (defaults to web folder in the current folder) -- **pecan_lib** : holds all the R packages for the specific version of PEcAn and R. This folder will be shared amongst all other containers, and will contain the compiled PEcAn code. (defaults to managed by docker, or $HOME/volumes/pecan/lib) +- **R_library** : holds all the R packages for the specific version of PEcAn and R. This folder will be shared amongst all other containers, and will contain the compiled PEcAn code. (defaults to managed by docker, or $HOME/volumes/pecan/R_library) - **pecan** this holds all the data, such as workflows and any downloaded data. (defaults to managed by docker, or $HOME/volumes/pecan/pecan) - **traefik** holds persisent data for the web proxy, that directs incoming traffic to the correct container. (defaults to managed by docker, or $HOME/volumes/pecan/traefik) - **postgres** holds the actual database data. If you want to backup the database, you can stop the postgres container, zip up the folder. (defaults to managed by docker, or $HOME/volumes/pecan/postgres) @@ -149,36 +130,37 @@ The following volumes are specified: These folders will hold all the persistent data for each of the respective containers and can grow. For example the postgres database is multiple GB. The pecan folder will hold all data produced by the workflows, including any downloaded data, and can grow to many giga bytes. +Note that the volume names shown here are the ones that appear in the compose file. When examining volumes in Docker, each name will have an additional `pecan_` prefixed: `pecan_pecan_home`, `pecan_traefik`, and so on. + #### Postgresql database First we bring up postgresql (we will start RabbitMQ as well since it takes some time to start): -``` -docker-compose up -d postgres rabbitmq +```sh +docker compose up -d postgres rabbitmq ``` -This will start postgresql and rabbitmq. We need to wait for a few minutes (you can look at the logs using `docker-compose logs postgres`) to see if it is ready. +This will start postgresql and rabbitmq. We need to wait for a few minutes (you can look at the logs using `docker compose logs postgres`) to see if it is ready. Once the database has finished starting up we will initialize the database. Now you can load the database using the following commands. The first command will make sure we have the latest version of the image, the second command will actually load the information into the database. -``` +```sh docker pull pecan/db docker run --rm --network pecan_pecan pecan/db ``` - Once that is done we create two users for BETY, first user is the guest user that you can use to login in the BETY interface. The second user is a user with admin rights. -``` -docker-compose run --rm bety user guestuser guestuser "Guest User" guestuser@example.com 4 4 -docker-compose run --rm bety user carya illinois "Carya Demo User" carya@example.com 1 1 +```sh +docker compose run --rm bety user guestuser guestuser "Guest User" guestuser@example.com 4 4 +docker compose run --rm bety user carya illinois "Carya Demo User" carya@example.com 1 1 ``` #### Load example data Once the database is loaded we can add some example data, some of the example runs and runs for the ED model, assume some of this data is available. This can take some time, but all the data needed will be copied to the `/data` folder in the pecan containers. As with the database we first pull the latest version of the image, and then execute the image to copy all the data: -``` +```sh docker pull pecan/data:develop docker run -ti --rm --network pecan_pecan --volume pecan_pecan:/data --env FQDN=docker pecan/data:develop ``` @@ -198,51 +180,57 @@ Next copy the R packages from a container to volume `pecan_lib`. This is not rea You can copy all the data using the following command. This will copy all compiled packages to your local machine. +```bash +docker run -ti --rm -v pecan_R_library:/rlib pecan/base:develop cp -a /usr/local/lib/R/site-library/. /rlib/ ``` -docker run -ti --rm -v pecan_lib:/rlib pecan/base:develop cp -a /usr/local/lib/R/site-library/. /rlib/ + +If you have set a custom UID or GID in your `.env`, change ownership of these files as described above for the data volume. E.g. if you use the same UID in the containers as on your host machine, run: + +```bash +docker run -ti --rm -v pecan_R_library:/rlib pecan/base:develop chown -R "$(id -u):$(id -g)" /rlib/ ``` #### Copy web config file (optional) If you want to use the web interface, you will need to: -1. Uncomment the web section from the `docker-compose.override.yml` file. This section includes three lines at the top of the file, just under the `services` section. Uncomment the lines that start `web:`, ` volumes:`, and `- pecan_web:`. +1. Uncomment the web section from the `docker-compose.override.yml` file. This section includes three lines at the top of the file, just under the `services` section. Uncomment the lines that start `web:`, `volumes:`, and `- pecan_web:`. 2. Then copy the config.php from the docker/web folder. You can do this using For Linux/MacOSX -``` +```sh cp docker/web/config.docker.php web/config.php ``` For Windows -``` +```sh copy docker\web\config.docker.php web\config.php ``` -### PEcAn Development +## PEcAn Development Setup To begin development we first have to bring up the full PEcAn stack. This assumes you have done once the steps above. You don\'t need to stop any running containers, you can use the following command to start all containers. At this point you have PEcAn running in docker. -``` -docker-compose up -d +```sh +docker compose up -d ``` The current folder (most likely your clone of the git repository) is mounted in some containers as `/pecan`, and in the case of rstudio also in your home folder as `pecan`. You can see which containers exactly in `docker-compose.override.yml`. -You can now modify the code on your local machine, or you can use [rstudio](http://pecan.localhost) in the docker stack. Once you made changes to the code you can compile the code either in the terminal of rstudio (`cd pecan && make`) or using `./scripts/compile.sh` from your machine (latter is nothing more than a shell script that runs `docker-compose exec executor sh -c 'cd /pecan && make'`. +You can now modify the code on your local machine, or you can use [rstudio](http://pecan.localhost) in the docker stack. Once you made changes to the code you can compile the code either in the terminal of rstudio (`cd pecan && make`) or using `./scripts/compile.sh` from your machine (latter is nothing more than a shell script that runs `docker compose exec executor sh -c 'cd /pecan && make'`. -The compiled code is written to `/usr/local/lib/R/site-library` which is mapped to `volumes/lib` on your machine. This same folder is mounted in many other containers, allowing you to share the same PEcAn modules in all containers. Now if you change a module, and compile all other containers will see and use this new version of your module. +The compiled code is written to `/usr/local/lib/R/site-library` which is mapped to `volumes/pecan/R_library` on your machine. This same folder is mounted in many other containers, allowing you to share the same PEcAn modules in all containers. Now if you change a module, and compile all other containers will see and use this new version of your module. -To compile the PEcAn code you can use the make command in either the rstudio container, or in the executor container. The script [`compile.sh`](sripts/compile.sh) will run make inside the executor container. +To compile the PEcAn code you can use the make command in either the rstudio container, or in the executor container. The script [`compile.sh`](scripts/compile.sh) will run make inside the executor container. ### Workflow Submission You can submit your workflow either in the executor container or in rstudio container. For example to run the `docker.sipnet.xml` workflow located in the tests folder you can use: -``` -docker-compose exec executor bash +```sh +docker compose exec executor bash # inside the container cd /pecan/tests R CMD ../web/workflow.R --settings docker.sipnet.xml @@ -250,13 +238,13 @@ R CMD ../web/workflow.R --settings docker.sipnet.xml A better way of doing this is developed as part of GSOC, in which case you can leverage of the restful interface defined, or using the new R PEcAn API package. -# PEcAn URLs +## PEcAn URLs -You can check the RabbitMQ server used by pecan using https://rabbitmq.pecan.localhost on the same server that the docker stack is running on. You can use rstudio either with http://server/rstudio or at http://rstudio.pecan.localhost. To check the traefik dashboard you can use http://traefik.pecan.localhost. +You can check the RabbitMQ server used by pecan using on the same server that the docker stack is running on. You can use rstudio either with or at . To check the traefik dashboard you can use . -If the stack is running on a remote machine, you can use ssh and port forwarding to connect to the server. For example `ssh -L 8000:localhost:80` will allow you to use http://rabbitmq.pecan.localhost:8000/ in your browser to connect to the remote PEcAn server RabbitMQ. +If the stack is running on a remote machine, you can use ssh and port forwarding to connect to the server. For example `ssh -L 8000:localhost:80` will allow you to use in your browser to connect to the remote PEcAn server RabbitMQ. -# Directory Structure +## Directory Structure Following are the main folders inside the pecan repository. @@ -292,30 +280,30 @@ Some of the docker build files. The Dockerfiles for each model are placed in the Small scripts that are used as part of the development and installation of PEcAn. -# Advanced Development Options +## Advanced Development Options -## Reset all containers/database +### Reset all containers/database If you want to start from scratch and remove all old data, but keep your pecan checked out folder, you can remove the folders where you have written the data (see `folders` below). You will also need to remove any of the docker managed volumes. To see all volumes you can do `docker volume ls -q -f name=pecan`. If you are sure, you can either remove them one by one, or remove them all at once using the command below. **THIS DESTROYS ALL DATA IN DOCKER MANAGED VOLUMES.**. -``` +```sh docker volume rm $(docker volume ls -q -f name=pecan) ``` If you changed the docker-compose.override.yml file to point to a location on disk for some of the containers (instead of having them managed by docker) you will need to actually delete the data on your local disk, docker will NOT do this. -## Reset the lib folder +## Reset the R_library folder -If you want to reset the pecan lib folder that is mounted across all machines, for example when there is a new version of PEcAn or a a new version of R, you will need to delete the volume pecan_lib, and repopulate it. To delete the volume use the following command, and then look at "copy R packages" to copy the data again. +If you want to reset the R library folder that is mounted across all machines, for example when there is a new version of PEcAn or a a new version of R, you will need to delete the volume pecan_R_library, and repopulate it. To delete the volume use the following command, and then look at "copy R packages" to copy the data again. -``` -docker-compose down -docker volume rm pecan_lib +```sh +docker compose down +docker volume rm pecan_R_library ``` ## Linux and User permissions -(On Mac OSX and Windows files should automatically be owned by the user running the docker-compose commands). +(On Mac OSX and Windows files should automatically be owned by the user running the docker compose commands). If you use mounted folders, make sure that these folders are writable by the containers. Docker on Linux will try to preserve the file permissions. To do this it might be necessary for the folders to have rw permissions. This can be done by using `chmod 777 $HOME/volumes/pecan/{lib,pecan,portainer,postgres,rabbitmq,traefik}`. @@ -323,19 +311,19 @@ This will leverage of NFS to mount the file system in your local docker image, c First install nfs server: -``` +```sh apt-get install nfs-kernel-server ``` Next export your home directory: -``` +```sh echo -e "$PWD\t127.0.0.1(rw,no_subtree_check,all_squash,anonuid=$(id -u),anongid=$(id -g))" | sudo tee -a /etc/exports ``` And export the filesystem. -``` +```sh sudo exportfs -va ``` @@ -343,7 +331,7 @@ At this point you have exported your home directory, only to your local machine. Finally we can modify the `docker-compose.override.yml` file to allow for writing files to your PEcAn folder as you: -``` +```sh volumes: pecan_home: driver_opts: diff --git a/LICENSE b/LICENSE index 5a9e44128f1..91ed7f17c2b 100644 --- a/LICENSE +++ b/LICENSE @@ -1,6 +1,39 @@ -## This is the master copy of the PEcAn License +All portions of the PEcAn project not marked otherwise are distributed under the BSD 3-clause license: -University of Illinois/NCSA Open Source License +Copyright (c) 2012-2024 Pecan Project + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this +list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, +this list of conditions and the following disclaimer in the documentation +and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its contributors +may be used to endorse or promote products derived from this software without +specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS “AS IS” AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + + + +---------------------------------- + + +Portions of the PEcAn project are distributed under the University of Illinois/NCSA Open Source License: Copyright (c) 2012, University of Illinois, NCSA. All rights reserved. @@ -32,3 +65,7 @@ ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE SOFTWARE. + + + +-------------------- diff --git a/Makefile b/Makefile index f00790f18fb..db701ada644 100644 --- a/Makefile +++ b/Makefile @@ -6,8 +6,8 @@ MODELS := basgra biocro clm45 dalec dvmdostem ed fates gday jules linkages \ ldndc lpjguess maat maespa preles sibcasa sipnet stics template MODULES := allometry assim.batch assim.sequential benchmark \ - data.atmosphere data.hydrology data.land \ - data.remote emulator meta.analysis \ + data.atmosphere data.land data.remote \ + emulator meta.analysis \ photosynthesis priors rtm uncertainty # Components not currently included in the build @@ -46,7 +46,7 @@ ALL_PKGS_D := $(BASE_D) $(MODULES_D) $(MODELS_D) SETROPTIONS := "options(Ncpus = ${NCPUS})" -EXPECTED_ROXYGEN_VERSION := 7.2.3 +EXPECTED_ROXYGEN_VERSION := 7.3.2 INSTALLED_ROXYGEN_VERSION := $(shell Rscript \ -e "if (requireNamespace('roxygen2', quietly = TRUE)) {" \ -e "cat(as.character(packageVersion('roxygen2')))" \ @@ -67,6 +67,11 @@ drop_parents = $(filter-out $(patsubst %/,%,$(dir $1)), $1) # Generates a list of regular files at any depth inside its argument files_in_dir = $(call drop_parents, $(call recurse_dir, $1)) +# Git hash + clean status for this directory +git_rev = $(shell \ + CLEAN=$$([[ -n $$(git status -s $1) ]] && echo "+mod"); \ + echo $$(git rev-parse --short=10 HEAD)"$$CLEAN") + # HACK: NA vs TRUE switch on dependencies argument is an ugly workaround for # a circular dependency between benchmark and data.land. # When this is fixed, can go back to simple `dependencies = TRUE` @@ -74,7 +79,8 @@ depends_R_pkg = ./scripts/time.sh "depends ${1}" ./scripts/confirm_deps.R ${1} \ $(if $(findstring modules/benchmark,$(1)),NA,TRUE) install_R_pkg = ./scripts/time.sh "install ${1}" Rscript \ -e ${SETROPTIONS} \ - -e "devtools::install('$(strip $(1))', upgrade=FALSE)" + -e "Sys.setenv(PECAN_GIT_REV='$(call git_rev,$1)')" \ + -e "remotes::install_local('$(strip $(1))', force=TRUE, dependencies=FALSE, upgrade=FALSE)" check_R_pkg = ./scripts/time.sh "check ${1}" Rscript scripts/check_with_errors.R $(strip $(1)) test_R_pkg = ./scripts/time.sh "test ${1}" Rscript \ -e "devtools::test('$(strip $(1))'," \ @@ -96,10 +102,19 @@ depends = .doc/$(1) .install/$(1) .check/$(1) .test/$(1) ### Rules -.PHONY: all install check test document shiny +.PHONY: all install check test document shiny \ + check_base check_models check_modules all: install document + +check_base: $(BASE_C) +check_models: $(MODELS_C) + +# Install base first as Modules has a circular dependency on base, +# and then run a check on modules +check_modules: $(BASE_I) $(MODULES_C) + document: $(ALL_PKGS_D) .doc/base/all install: $(ALL_PKGS_I) .install/base/all check: $(ALL_PKGS_C) .check/base/all @@ -123,12 +138,6 @@ $(subst .doc/models/template,,$(MODELS_D)): .install/models/template ### Order-only dependencies # (i.e. prerequisites must exist before building target, but # target need not be rebuilt when a prerequisite changes) - -.doc/base/all: | $(ALL_PKGS_D) -.install/base/all: | $(ALL_PKGS_I) -.check/base/all: | $(ALL_PKGS_C) -.test/base/all: | $(ALL_PKGS_T) - include Makefile.depends clean: @@ -143,10 +152,11 @@ clean: .install/roxygen2: | .install .install/devtools + ./scripts/time.sh "roxygen2 ${1}" Rscript -e ${SETROPTIONS} \ -e "if (!requireNamespace('roxygen2', quietly = TRUE)" \ - -e " || packageVersion('roxygen2') != '7.2.3') {" \ - -e " devtools::install_github('r-lib/roxygen2@v7.2.3')" \ + -e " || packageVersion('roxygen2') != '"${EXPECTED_ROXYGEN_VERSION}"') {" \ + -e " cran <- c(getOption('repos'), 'cloud.r-project.org')" \ + -e " remotes::install_version('roxygen2', '"${EXPECTED_ROXYGEN_VERSION}"', repos = cran, upgrade = FALSE)" \ -e "}" - $(eval INSTALLED_ROXYGEN_VERSION := 7.2.3) + $(eval INSTALLED_ROXYGEN_VERSION := ${EXPECTED_ROXYGEN_VERSION}) echo `date` > $@ .install/testthat: | .install diff --git a/Makefile.depends b/Makefile.depends index 652db3c287a..977e82b8dc0 100644 --- a/Makefile.depends +++ b/Makefile.depends @@ -1,44 +1,40 @@ # autogenerated -$(call depends,base/all): | .install/base/db .install/base/settings .install/modules/meta.analysis .install/base/logger .install/base/utils .install/modules/uncertainty .install/modules/data.atmosphere .install/modules/data.land .install/modules/data.remote .install/modules/assim.batch .install/modules/emulator .install/modules/priors .install/modules/benchmark .install/base/remote .install/base/workflow .install/models/ed .install/models/sipnet .install/models/biocro .install/models/dalec .install/models/linkages .install/modules/allometry .install/modules/photosynthesis +$(call depends,base/all): | .install/base/db .install/base/logger .install/base/remote .install/base/settings .install/base/utils .install/base/workflow .install/models/biocro .install/models/dalec .install/models/ed .install/models/linkages .install/models/sipnet .install/modules/allometry .install/modules/assim.batch .install/modules/benchmark .install/modules/data.atmosphere .install/modules/data.land .install/modules/data.remote .install/modules/emulator .install/modules/meta.analysis .install/modules/photosynthesis .install/modules/priors .install/modules/uncertainty $(call depends,base/db): | .install/base/logger .install/base/remote .install/base/utils -$(call depends,base/logger): | -$(call depends,base/qaqc): | .install/base/db .install/base/logger .install/models/biocro .install/models/ed .install/models/sipnet .install/base/utils +$(call depends,base/qaqc): | .install/base/db .install/base/logger .install/base/utils .install/models/biocro .install/models/ed .install/models/sipnet $(call depends,base/remote): | .install/base/logger $(call depends,base/settings): | .install/base/db .install/base/logger .install/base/remote .install/base/utils $(call depends,base/utils): | .install/base/logger -$(call depends,base/visualization): | .install/base/db .install/base/logger .install/base/utils -$(call depends,base/workflow): | .install/modules/data.atmosphere .install/modules/data.land .install/base/db .install/base/logger .install/base/remote .install/base/settings .install/modules/uncertainty .install/base/utils -$(call depends,modules/allometry): | .install/base/db -$(call depends,modules/assim.batch): | .install/modules/benchmark .install/base/db .install/modules/emulator .install/base/logger .install/modules/meta.analysis .install/base/remote .install/base/settings .install/modules/uncertainty .install/base/utils .install/base/workflow -$(call depends,modules/assim.sequential): | .install/base/db .install/base/logger .install/base/remote .install/base/settings .install/modules/uncertainty .install/base/workflow .install/modules/benchmark .install/modules/data.remote -$(call depends,modules/benchmark): | .install/base/db .install/base/logger .install/base/remote .install/base/settings .install/base/utils .install/modules/data.land -$(call depends,modules/data.atmosphere): | .install/base/db .install/base/logger .install/base/remote .install/base/utils -$(call depends,modules/data.hydrology): | .install/base/logger .install/base/utils -$(call depends,modules/data.land): | .install/modules/benchmark .install/modules/data.atmosphere .install/base/db .install/base/logger .install/base/remote .install/base/utils .install/base/visualization .install/base/settings -$(call depends,modules/data.remote): | .install/base/db .install/base/utils .install/base/logger .install/base/remote -$(call depends,modules/emulator): | -$(call depends,modules/meta.analysis): | .install/base/utils .install/base/db .install/base/logger .install/base/settings -$(call depends,modules/photosynthesis): | -$(call depends,modules/priors): | .install/base/logger .install/modules/meta.analysis .install/base/utils .install/base/visualization -$(call depends,modules/rtm): | .install/base/logger .install/modules/assim.batch .install/base/utils .install/models/ed -$(call depends,modules/uncertainty): | .install/base/db .install/modules/emulator .install/base/logger .install/modules/priors .install/base/settings .install/base/utils -$(call depends,models/basgra): | .install/base/logger .install/modules/data.atmosphere .install/base/utils -$(call depends,models/biocro): | .install/base/logger .install/base/remote .install/base/utils .install/base/settings .install/modules/data.atmosphere .install/modules/data.land .install/base/db +$(call depends,base/visualization): | .install/base/logger +$(call depends,base/workflow): | .install/base/db .install/base/logger .install/base/remote .install/base/settings .install/base/utils .install/modules/data.atmosphere .install/modules/data.land .install/modules/uncertainty +$(call depends,models/basgra): | .install/base/logger .install/base/utils .install/modules/data.atmosphere +$(call depends,models/biocro): | .install/base/db .install/base/logger .install/base/remote .install/base/settings .install/base/utils .install/modules/data.atmosphere .install/modules/data.land $(call depends,models/cable): | .install/base/logger .install/base/utils $(call depends,models/clm45): | .install/base/logger .install/base/utils $(call depends,models/dalec): | .install/base/logger .install/base/remote .install/base/utils $(call depends,models/dvmdostem): | .install/base/logger .install/base/utils -$(call depends,models/ed): | .install/modules/data.atmosphere .install/modules/data.land .install/base/logger .install/base/remote .install/base/settings .install/base/utils +$(call depends,models/ed): | .install/base/logger .install/base/remote .install/base/settings .install/base/utils .install/modules/data.atmosphere .install/modules/data.land $(call depends,models/fates): | .install/base/logger .install/base/remote .install/base/utils -$(call depends,models/gday): | .install/base/utils .install/base/logger .install/base/remote -$(call depends,models/jules): | .install/modules/data.atmosphere .install/base/logger .install/base/remote .install/base/utils -$(call depends,models/ldndc): | .install/base/db .install/base/logger .install/base/utils .install/base/remote .install/modules/data.atmosphere -$(call depends,models/linkages): | .install/base/utils .install/modules/data.atmosphere .install/base/logger .install/base/remote +$(call depends,models/gday): | .install/base/logger .install/base/remote .install/base/utils +$(call depends,models/jules): | .install/base/logger .install/base/remote .install/base/utils .install/modules/data.atmosphere +$(call depends,models/ldndc): | .install/base/logger .install/base/remote .install/base/utils .install/modules/data.atmosphere .install/modules/data.land +$(call depends,models/linkages): | .install/base/db .install/base/logger .install/base/remote .install/base/utils .install/modules/data.land $(call depends,models/lpjguess): | .install/base/logger .install/base/remote .install/base/utils -$(call depends,models/maat): | .install/modules/data.atmosphere .install/base/logger .install/base/remote .install/base/settings .install/base/utils -$(call depends,models/maespa): | .install/modules/data.atmosphere .install/base/logger .install/base/remote .install/base/utils -$(call depends,models/preles): | .install/base/utils .install/base/logger .install/modules/data.atmosphere .install/base/utils -$(call depends,models/sibcasa): | .install/base/logger .install/base/utils -$(call depends,models/sipnet): | .install/modules/data.atmosphere .install/base/logger .install/base/remote .install/base/utils -$(call depends,models/stics): | .install/base/settings .install/base/db .install/base/logger .install/base/utils .install/base/remote +$(call depends,models/maat): | .install/base/logger .install/base/remote .install/base/settings .install/base/utils .install/modules/data.atmosphere +$(call depends,models/maespa): | .install/base/logger .install/base/remote .install/base/utils .install/modules/data.atmosphere +$(call depends,models/preles): | .install/base/logger .install/base/utils .install/modules/data.atmosphere +$(call depends,models/sibcasa): | .install/base/logger +$(call depends,models/sipnet): | .install/base/logger .install/base/remote .install/base/utils .install/modules/data.atmosphere .install/modules/data.land +$(call depends,models/stics): | .install/base/logger .install/base/remote .install/base/settings .install/base/utils $(call depends,models/template): | .install/base/db .install/base/logger .install/base/utils +$(call depends,modules/allometry): | .install/base/db +$(call depends,modules/assim.batch): | .install/base/db .install/base/logger .install/base/remote .install/base/settings .install/base/utils .install/base/workflow .install/modules/benchmark .install/modules/emulator .install/modules/meta.analysis .install/modules/uncertainty +$(call depends,modules/assim.sequential): | .install/base/db .install/base/logger .install/base/remote .install/base/settings .install/base/utils .install/base/visualization .install/base/workflow .install/modules/benchmark .install/modules/data.land .install/modules/data.remote .install/modules/uncertainty +$(call depends,modules/benchmark): | .install/base/db .install/base/logger .install/base/settings .install/base/utils .install/modules/data.land +$(call depends,modules/data.atmosphere): | .install/base/db .install/base/logger .install/base/remote .install/base/settings .install/base/utils +$(call depends,modules/data.land): | .install/base/db .install/base/logger .install/base/remote .install/base/settings .install/base/utils .install/base/visualization .install/modules/benchmark +$(call depends,modules/data.remote): | .install/base/db .install/base/logger .install/base/remote .install/base/utils +$(call depends,modules/meta.analysis): | .install/base/db .install/base/logger .install/base/settings .install/base/utils +$(call depends,modules/priors): | .install/base/logger .install/base/utils .install/base/visualization .install/modules/meta.analysis +$(call depends,modules/rtm): | .install/base/logger .install/base/utils .install/models/ed .install/modules/assim.batch +$(call depends,modules/uncertainty): | .install/base/db .install/base/logger .install/base/settings .install/base/utils .install/modules/emulator .install/modules/priors diff --git a/README.md b/README.md index 838793ba7ad..c30370067e5 100644 --- a/README.md +++ b/README.md @@ -1,16 +1,15 @@ [![GitHub Actions CI](https://github.com/PecanProject/pecan/workflows/CI/badge.svg)](https://github.com/PecanProject/pecan/actions) -[![Slack](https://img.shields.io/badge/slack-login-green.svg)](https://pecanproject.slack.com/) +[![Slack](https://img.shields.io/badge/slack-login-green.svg)](https://pecanproject.slack.com/) [![Slack](https://img.shields.io/badge/slack-join_chat-green.svg)](https://join.slack.com/t/pecanproject/shared_invite/enQtMzkyODUyMjQyNTgzLWEzOTM1ZjhmYWUxNzYwYzkxMWVlODAyZWQwYjliYzA0MDA0MjE4YmMyOTFhMjYyMjYzN2FjODE4N2Y4YWFhZmQ) [![DOI](https://zenodo.org/badge/4469/PecanProject/pecan.svg)](https://zenodo.org/badge/latestdoi/4469/PecanProject/pecan) - - ## Our Vision + #### Ecosystem science, policy, and management informed by the best available data and models ## Our Mission -#### Develop and promote accessible tools for reproducible ecosystem modeling and forecasting +#### Develop and promote accessible tools for reproducible ecosystem modeling and forecasting ## What is PEcAn? @@ -22,7 +21,7 @@ PEcAn is not itself an ecosystem model, and it can be used to with a variety of ## Documentation -Consult documentation of the PEcAn Project; either the [lastest stable development](https://pecanproject.github.io/pecan-documentation/develop/) branch, the latest [release](https://pecanproject.github.io/pecan-documentation/master/). Documentation from [earlier releases is here](https://pecanproject.github.io/documentation.html). +Consult documentation of the PEcAn Project; either the [latest stable development](https://pecanproject.github.io/pecan-documentation/develop/) branch, the latest [release](https://pecanproject.github.io/pecan-documentation/latest/). Documentation from [earlier releases is here](https://pecanproject.github.io/documentation.html). ## Getting Started @@ -31,25 +30,30 @@ See our ["Tutorials Page"](https://pecanproject.github.io/tutorials.html) that p ### Installation Complete instructions on how to install PEcAn can be found in the [documentation here](https://pecanproject.github.io/pecan-documentation/develop/pecan-manual-setup.html). To get PEcAn up and running you can use one of three methods: -1. Run a [Virtual Machine](https://pecanproject.github.io/pecan-documentation/develop/pecan-manual-setup.html#install-vm). This is recommended for students and new users, and provides a consistent, tested environment for each release. -2. Use [Docker](https://pecanproject.github.io/pecan-documentation/develop/pecan-manual-setup.html#install-docker). This is recommended, especially for development and production deployment. -3. Install all of the PEcAn R packages on your own Linux or MacOS computer or server. This can be done by [installing from r-universe](https://pecanproject.github.io/pecan-documentation/develop/r-universe.html): -``` r + +1. Run a [Virtual Machine](https://pecanproject.github.io/pecan-documentation/develop/install-vm.html#install-vm). This is recommended for students and new users, and provides a consistent, tested environment for each release. + +2. Use [Docker](https://pecanproject.github.io/pecan-documentation/develop/install-docker.html#install-docker). This is recommended, especially for development and production deployment. + +3. Install all of the PEcAn R packages on your own Linux or MacOS computer or server. This can be done by [installing from r-universe](https://pecanproject.github.io/pecan-documentation/develop/r-universe.html): + +```R # Enable repository from pecanproject options(repos = c( pecanproject = 'https://pecanproject.r-universe.dev', CRAN = 'https://cloud.r-project.org')) # Download and install PEcAn.all in R install.packages('PEcAn.all') +``` -``` This, however, may have limited functionality without also installing other components of PEcAn, in particular [BETYdb](https://pecanproject.github.io/pecan-documentation/develop/osinstall.html#install-bety). ### Website -Visit our [webage](https://pecanproject.github.io) to keep up with latest news, version, and information about the PEcAn Project +Visit our [webpage](https://pecanproject.github.io) to keep up with latest news, version, and information about the PEcAn Project #### Web Interface demo + The fastest way to begin modeling ecosystems is through the PEcAn web interface. We have a [demo website](http://pecan.ncsa.illinois.edu/pecan/01-introduction.php) that runs the current version of PEcAn. Using this instance you can perform a run using either ED or SIPNET at any of the predefined sites. @@ -82,12 +86,16 @@ University of Illinois/NCSA Open Source License Copyright (c) 2012, University of Illinois, NCSA. All rights reserved. PEcAn project -www.pecanproject.org + Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal with the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: -- Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimers. -- Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimers in the documentation and/or other materials provided with the distribution. -- Neither the names of University of Illinois, NCSA, nor the names of its contributors may be used to endorse or promote products derived from this Software without specific prior written permission. +* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimers. +* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimers in the documentation and/or other materials provided with the distribution. +* Neither the names of University of Illinois, NCSA, nor the names of its contributors may be used to endorse or promote products derived from this Software without specific prior written permission. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON INFRINGEMENT. IN NO EVENT SHALL THE CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE SOFTWARE. + +## Activities + +![Alt](https://repobeats.axiom.co/api/embed/9d39b0af80fbfa979e349a529c05f21bbac9f858.svg "Repobeats analytics image") diff --git a/apps/api/Dockerfile b/apps/api/Dockerfile index 4aa5a6a2d9a..059e14f0c0c 100644 --- a/apps/api/Dockerfile +++ b/apps/api/Dockerfile @@ -6,7 +6,6 @@ ARG IMAGE_VERSION="latest" # PECAN FOR MODEL BASE IMAGE # -------------------------------------------------------------------------- FROM pecan/base:${IMAGE_VERSION} -LABEL maintainer="Tezan Sahu " EXPOSE 8000 @@ -15,9 +14,7 @@ EXPOSE 8000 # -------------------------------------------------------------------------- # COMMAND TO RUN -RUN --mount=type=secret,id=github_token \ - export GITHUB_PAT=`cat /run/secrets/github_token` \ - && apt-get update \ +RUN apt-get update \ && apt-get install libsodium-dev -y \ && rm -rf /var/lib/apt/lists/* \ && Rscript -e "devtools::install_version('promises', '1.1.0')" \ @@ -35,7 +32,6 @@ ENV AUTH_REQ="TRUE" \ SECRET_KEY_BASE="thisisnotasecret" WORKDIR /api/R - -CMD Rscript entrypoint.R - COPY ./ /api + +CMD ["Rscript", "entrypoint.R"] diff --git a/apps/api/R/submit.workflow.R b/apps/api/R/submit.workflow.R index ace1c2896f7..34d34b3853d 100644 --- a/apps/api/R/submit.workflow.R +++ b/apps/api/R/submit.workflow.R @@ -124,7 +124,7 @@ submit.workflow.list <- function(workflowList, userDetails) { # Post workflow to RabbitMQ message <- list(folder = outdir, workflowid = workflow_id_str) - res <- PEcAn.remote::rabbitmq_post_message(workflowList$host$rabbitmq$uri, "pecan", message, "rabbitmq") + res <- PEcAn.remote::rabbitmq_post_message(workflowList$host$rabbitmq$uri, "pecan", message) if(res$routed){ return(list(workflow_id = workflow_id_str, status = "Submitted successfully")) diff --git a/apps/api/pecanapi-spec.yml b/apps/api/pecanapi-spec.yml index b6ea7e3a757..f06d3dfde0b 100644 --- a/apps/api/pecanapi-spec.yml +++ b/apps/api/pecanapi-spec.yml @@ -18,8 +18,8 @@ info: contact: email: "pecanproj@gmail.com" license: - name: University of Illinois/NCSA Open Source License - url: https://opensource.org/licenses/NCSA + name: BSD-3 + url: https://opensource.org/license/bsd-3-clause externalDocs: description: Find out more about PEcAn Project url: https://pecanproject.github.io/ diff --git a/apps/api/tests/test.auth.R b/apps/api/tests/test.auth.R index 20fe35ef213..4ced4f5b05a 100644 --- a/apps/api/tests/test.auth.R +++ b/apps/api/tests/test.auth.R @@ -2,7 +2,7 @@ context("Testing authentication for API") test_that("Using correct username & password returns Status 200", { res <- httr::GET( - "http://localhost:8000/api/models/", + "http://pecan.localhost/api/models/", httr::authenticate("carya", "illinois") ) expect_equal(res$status, 200) @@ -10,7 +10,7 @@ test_that("Using correct username & password returns Status 200", { test_that("Using incorrect username & password returns Status 401", { res <- httr::GET( - "http://localhost:8000/api/models/", + "http://pecan.localhost/api/models/", httr::authenticate("carya", "wrong_password") ) expect_equal(res$status, 401) @@ -18,7 +18,7 @@ test_that("Using incorrect username & password returns Status 401", { test_that("Not using username & password returns Status 401", { res <- httr::GET( - "http://localhost:8000/api/models/", + "http://pecan.localhost/api/models/", ) expect_equal(res$status, 401) }) \ No newline at end of file diff --git a/apps/api/tests/test.formats.R b/apps/api/tests/test.formats.R index 6261b711bf5..10ed4c4ceb8 100644 --- a/apps/api/tests/test.formats.R +++ b/apps/api/tests/test.formats.R @@ -2,7 +2,7 @@ context("Testing all formats related endpoints") test_that("Calling /api/formats/ with valid parameters returns Status 200", { res <- httr::GET( - "http://localhost:8000/api/formats/?format_name=ameriflux&mimetype=csv&ignore_case=TRUE", + "http://pecan.localhost/api/formats/?format_name=ameriflux&mimetype=csv&ignore_case=TRUE", httr::authenticate("carya", "illinois") ) expect_equal(res$status, 200) @@ -10,7 +10,7 @@ test_that("Calling /api/formats/ with valid parameters returns Status 200", { test_that("Calling /api/formats/ with invalid parameters returns Status 404", { res <- httr::GET( - "http://localhost:8000/api/formats/?format_name=random&mimetype=random&ignore_case=TRUE", + "http://pecan.localhost/api/formats/?format_name=random&mimetype=random&ignore_case=TRUE", httr::authenticate("carya", "illinois") ) expect_equal(res$status, 404) @@ -18,7 +18,7 @@ test_that("Calling /api/formats/ with invalid parameters returns Status 404", { test_that("Calling /api/formats/{format_id} returns Status 200", { res <- httr::GET( - "http://localhost:8000/api/formats/19", + "http://pecan.localhost/api/formats/19", httr::authenticate("carya", "illinois") ) expect_equal(res$status, 200) @@ -26,7 +26,7 @@ test_that("Calling /api/formats/{format_id} returns Status 200", { test_that("Calling /api/formats/{format_id} with invalid parameters returns Status 404", { res <- httr::GET( - "http://localhost:8000/api/formats/0", + "http://pecan.localhost/api/formats/0", httr::authenticate("carya", "illinois") ) expect_equal(res$status, 404) diff --git a/apps/api/tests/test.inputs.R b/apps/api/tests/test.inputs.R index cba2ee9e792..106bca749b3 100644 --- a/apps/api/tests/test.inputs.R +++ b/apps/api/tests/test.inputs.R @@ -2,7 +2,7 @@ context("Testing all inputs related endpoints") test_that("Calling /api/inputs/ with valid parameters returns Status 200", { res <- httr::GET( - "http://localhost:8000/api/inputs/?model_id=1000000022&site_id=676", + "http://pecan.localhost/api/inputs/?model_id=1000000022&site_id=676", httr::authenticate("carya", "illinois") ) expect_equal(res$status, 200) @@ -10,7 +10,7 @@ test_that("Calling /api/inputs/ with valid parameters returns Status 200", { test_that("Calling /api/inputs/ with invalid parameters returns Status 404", { res <- httr::GET( - "http://localhost:8000/api/inputs/?model_id=0&site_id=0", + "http://pecan.localhost/api/inputs/?model_id=0&site_id=0", httr::authenticate("carya", "illinois") ) expect_equal(res$status, 404) @@ -18,7 +18,7 @@ test_that("Calling /api/inputs/ with invalid parameters returns Status 404", { test_that("Calling /api/inputs/{input_id} with valid parameters returns Status 200", { res <- httr::GET( - paste0("http://localhost:8000/api/inputs/", 99000000003), + paste0("http://pecan.localhost/api/inputs/", 99000000003), httr::authenticate("carya", "illinois") ) expect_equal(res$status, 200) @@ -26,7 +26,7 @@ test_that("Calling /api/inputs/{input_id} with valid parameters returns Status 2 test_that("Calling /api/inputs/{input_id} with invalid parameters returns Status 404", { res <- httr::GET( - "http://localhost:8000/api/inputs/0", + "http://pecan.localhost/api/inputs/0", httr::authenticate("carya", "illinois") ) expect_equal(res$status, 404) @@ -34,7 +34,7 @@ test_that("Calling /api/inputs/{input_id} with invalid parameters returns Status test_that("Calling /api/inputs/{input_id}?filename={filename} with valid parameters returns Status 200", { res <- httr::GET( - paste0("http://localhost:8000/api/inputs/295?filename=fraction.plantation"), + paste0("http://pecan.localhost/api/inputs/295?filename=fraction.plantation"), httr::authenticate("carya", "illinois") ) expect_equal(res$status, 200) @@ -42,7 +42,7 @@ test_that("Calling /api/inputs/{input_id}?filename={filename} with valid paramet test_that("Calling /api/inputs/{input_id}?filename={filename} with invalid parameters returns Status 404", { res <- httr::GET( - "http://localhost:8000/api/inputs/295?filename=random", + "http://pecan.localhost/api/inputs/295?filename=random", httr::authenticate("carya", "illinois") ) expect_equal(res$status, 400) diff --git a/apps/api/tests/test.models.R b/apps/api/tests/test.models.R index d1be63b006f..d3dcff2b6ec 100644 --- a/apps/api/tests/test.models.R +++ b/apps/api/tests/test.models.R @@ -2,7 +2,7 @@ context("Testing all models endpoints") test_that("Calling /api/models/ returns Status 200", { res <- httr::GET( - "http://localhost:8000/api/models/?model_name=SIPNET&revision=ssr", + "http://pecan.localhost/api/models/?model_name=SIPNET&revision=ssr", httr::authenticate("carya", "illinois") ) expect_equal(res$status, 200) @@ -10,7 +10,7 @@ test_that("Calling /api/models/ returns Status 200", { test_that("Calling /api/models/ with invalid parameters returns Status 404", { res <- httr::GET( - "http://localhost:8000/api/models/?model_name=random&revision=random", + "http://pecan.localhost/api/models/?model_name=random&revision=random", httr::authenticate("carya", "illinois") ) expect_equal(res$status, 404) @@ -18,7 +18,7 @@ test_that("Calling /api/models/ with invalid parameters returns Status 404", { test_that("Calling /api/models/{model_id} returns Status 200", { res <- httr::GET( - "http://localhost:8000/api/models/1000000014", + "http://pecan.localhost/api/models/1000000014", httr::authenticate("carya", "illinois") ) expect_equal(res$status, 200) @@ -26,7 +26,7 @@ test_that("Calling /api/models/{model_id} returns Status 200", { test_that("Calling /api/models/{model_id} with invalid parameters returns Status 404", { res <- httr::GET( - "http://localhost:8000/api/models/1", + "http://pecan.localhost/api/models/1", httr::authenticate("carya", "illinois") ) expect_equal(res$status, 404) diff --git a/apps/api/tests/test.pfts.R b/apps/api/tests/test.pfts.R index b4de5cbe393..7931c324938 100644 --- a/apps/api/tests/test.pfts.R +++ b/apps/api/tests/test.pfts.R @@ -2,7 +2,7 @@ context("Testing all PFTs endpoints") test_that("Calling /api/pfts/ returns Status 200", { res <- httr::GET( - "http://localhost:8000/api/pfts/?pft_name=temperate&pft_type=plant&model_type=sipnet", + "http://pecan.localhost/api/pfts/?pft_name=temperate&pft_type=plant&model_type=sipnet", httr::authenticate("carya", "illinois") ) expect_equal(res$status, 200) @@ -10,7 +10,7 @@ test_that("Calling /api/pfts/ returns Status 200", { test_that("Calling /api/pfts/ with invalid parameters returns Status 404", { res <- httr::GET( - "http://localhost:8000/api/pfts/?pft_name=random&model_type=random", + "http://pecan.localhost/api/pfts/?pft_name=random&model_type=random", httr::authenticate("carya", "illinois") ) expect_equal(res$status, 404) @@ -18,7 +18,7 @@ test_that("Calling /api/pfts/ with invalid parameters returns Status 404", { test_that("Calling /api/pfts/{pft_id} returns Status 200", { res <- httr::GET( - "http://localhost:8000/api/pfts/2000000045", + "http://pecan.localhost/api/pfts/2000000045", httr::authenticate("carya", "illinois") ) expect_equal(res$status, 200) @@ -26,7 +26,7 @@ test_that("Calling /api/pfts/{pft_id} returns Status 200", { test_that("Calling /api/pfts/{pft_id} with invalid parameters returns Status 404", { res <- httr::GET( - "http://localhost:8000/api/pfts/0", + "http://pecan.localhost/api/pfts/0", httr::authenticate("carya", "illinois") ) expect_equal(res$status, 404) diff --git a/apps/api/tests/test.ping.R b/apps/api/tests/test.ping.R index defd30bdb45..fc8ec0bf97f 100644 --- a/apps/api/tests/test.ping.R +++ b/apps/api/tests/test.ping.R @@ -1,6 +1,6 @@ context("Testing the /api/ping endpoint") test_that("Calling /api/ping returns Status 200", { - res <- httr::GET("http://localhost:8000/api/ping") + res <- httr::GET("http://pecan.localhost/api/ping") expect_equal(res$status, 200) }) \ No newline at end of file diff --git a/apps/api/tests/test.runs.R b/apps/api/tests/test.runs.R index f88e8977f82..2c67da5a2a6 100644 --- a/apps/api/tests/test.runs.R +++ b/apps/api/tests/test.runs.R @@ -2,7 +2,7 @@ context("Testing all runs endpoints") test_that("Calling /api/runs/ with a valid workflow id returns Status 200", { res <- httr::GET( - "http://localhost:8000/api/runs/?workflow_id=1000009172", + "http://pecan.localhost/api/runs/?workflow_id=1000009172", httr::authenticate("carya", "illinois") ) expect_equal(res$status, 200) @@ -12,7 +12,7 @@ test_that("Calling /api/runs/ with a valid workflow id returns Status 200", { test_that("Calling /api/runs/{id} with a valid run id returns Status 200", { res <- httr::GET( - "http://localhost:8000/api/runs/1002042201", + "http://pecan.localhost/api/runs/1002042201", httr::authenticate("carya", "illinois") ) expect_equal(res$status, 200) @@ -20,7 +20,7 @@ test_that("Calling /api/runs/{id} with a valid run id returns Status 200", { test_that("Calling /api/runs/ with a invalid workflow id returns Status 404", { res <- httr::GET( - "http://localhost:8000/api/runs/?workflow_id=1000000000", + "http://pecan.localhost/api/runs/?workflow_id=1000000000", httr::authenticate("carya", "illinois") ) expect_equal(res$status, 404) @@ -28,7 +28,7 @@ test_that("Calling /api/runs/ with a invalid workflow id returns Status 404", { test_that("Calling /api/runs/{id} with a invalid run id returns Status 404", { res <- httr::GET( - "http://localhost:8000/api/runs/1000000000", + "http://pecan.localhost/api/runs/1000000000", httr::authenticate("carya", "illinois") ) expect_equal(res$status, 404) @@ -36,7 +36,7 @@ test_that("Calling /api/runs/{id} with a invalid run id returns Status 404", { test_that("Calling /api/runs/{run_id}/graph/{year}/{yvar}/ with valid inputs returns Status 200", { res <- httr::GET( - "http://localhost:8000/api/runs/99000000282/graph/2002/GPP", + "http://pecan.localhost/api/runs/99000000282/graph/2002/GPP", httr::authenticate("carya", "illinois") ) expect_equal(res$status, 200) @@ -44,7 +44,7 @@ test_that("Calling /api/runs/{run_id}/graph/{year}/{yvar}/ with valid inputs ret test_that("Calling /api/runs/{run_id}/graph/{year}/{yvar}/ with valid inputs returns Status 200", { res <- httr::GET( - "http://localhost:8000/api/runs/1000000000/graph/100/GPP", + "http://pecan.localhost/api/runs/1000000000/graph/100/GPP", httr::authenticate("carya", "illinois") ) expect_equal(res$status, 404) @@ -52,7 +52,7 @@ test_that("Calling /api/runs/{run_id}/graph/{year}/{yvar}/ with valid inputs ret test_that("Calling /api/runs/{run_id}/input/{filename} with valid inputs returns Status 200", { res <- httr::GET( - "http://localhost:8000/api/runs/99000000282/input/sipnet.in", + "http://pecan.localhost/api/runs/99000000282/input/sipnet.in", httr::authenticate("carya", "illinois") ) expect_equal(res$status, 200) @@ -60,7 +60,7 @@ test_that("Calling /api/runs/{run_id}/input/{filename} with valid inputs returns test_that("Calling /api/runs/{run_id}/input/{filename} with valid inputs returns Status 200", { res <- httr::GET( - "http://localhost:8000/api/runs/1000000000/input/randomfile", + "http://pecan.localhost/api/runs/1000000000/input/randomfile", httr::authenticate("carya", "illinois") ) expect_equal(res$status, 404) @@ -68,7 +68,7 @@ test_that("Calling /api/runs/{run_id}/input/{filename} with valid inputs returns test_that("Calling /api/runs/{run_id}/output/{filename} with valid inputs returns Status 200", { res <- httr::GET( - "http://localhost:8000/api/runs/99000000282/output/2002.nc", + "http://pecan.localhost/api/runs/99000000282/output/2002.nc", httr::authenticate("carya", "illinois") ) expect_equal(res$status, 200) @@ -76,7 +76,7 @@ test_that("Calling /api/runs/{run_id}/output/{filename} with valid inputs return test_that("Calling /api/runs/{run_id}/output/{filename} with valid inputs returns Status 200", { res <- httr::GET( - "http://localhost:8000/api/runs/1000000000/output/randomfile", + "http://pecan.localhost/api/runs/1000000000/output/randomfile", httr::authenticate("carya", "illinois") ) expect_equal(res$status, 404) diff --git a/apps/api/tests/test.sites.R b/apps/api/tests/test.sites.R index a636390ab28..7c75eb3ca6a 100644 --- a/apps/api/tests/test.sites.R +++ b/apps/api/tests/test.sites.R @@ -2,7 +2,7 @@ context("Testing all sites endpoints") test_that("Calling /api/sites/ returns Status 200", { res <- httr::GET( - "http://localhost:8000/api/sites/?sitename=washington", + "http://pecan.localhost/api/sites/?sitename=washington", httr::authenticate("carya", "illinois") ) expect_equal(res$status, 200) @@ -10,7 +10,7 @@ test_that("Calling /api/sites/ returns Status 200", { test_that("Calling /api/sites/ with invalid parameters returns Status 404", { res <- httr::GET( - "http://localhost:8000/api/sites/?sitename=random", + "http://pecan.localhost/api/sites/?sitename=random", httr::authenticate("carya", "illinois") ) expect_equal(res$status, 404) @@ -18,7 +18,7 @@ test_that("Calling /api/sites/ with invalid parameters returns Status 404", { test_that("Calling /api/sites/{site_id} returns Status 200", { res <- httr::GET( - "http://localhost:8000/api/sites/676", + "http://pecan.localhost/api/sites/676", httr::authenticate("carya", "illinois") ) expect_equal(res$status, 200) @@ -26,7 +26,7 @@ test_that("Calling /api/sites/{site_id} returns Status 200", { test_that("Calling /api/sites/{site_id} with invalid parameters returns Status 404", { res <- httr::GET( - "http://localhost:8000/api/sites/0", + "http://pecan.localhost/api/sites/0", httr::authenticate("carya", "illinois") ) expect_equal(res$status, 404) diff --git a/apps/api/tests/test.workflows.R b/apps/api/tests/test.workflows.R index fb650bbf732..c17f16b598e 100644 --- a/apps/api/tests/test.workflows.R +++ b/apps/api/tests/test.workflows.R @@ -2,7 +2,7 @@ context("Testing all workflows endpoints") test_that("Calling /api/workflows/ with valid parameters returns Status 200", { res <- httr::GET( - "http://localhost:8000/api/workflows/?model_id=1000000022&site_id=676", + "http://pecan.localhost/api/workflows/?model_id=1000000022&site_id=676", httr::authenticate("carya", "illinois") ) expect_equal(res$status, 200) @@ -12,7 +12,7 @@ test_that("Calling /api/workflows/ with valid parameters returns Status 200", { test_that("Calling /api/workflows/{id} with valid workflow id returns Status 200", { res <- httr::GET( - "http://localhost:8000/api/workflows/1000009172", + "http://pecan.localhost/api/workflows/1000009172", httr::authenticate("carya", "illinois") ) expect_equal(res$status, 200) @@ -20,7 +20,7 @@ test_that("Calling /api/workflows/{id} with valid workflow id returns Status 200 test_that("Calling /api/workflows/ with invalid parameters returns Status 404", { res <- httr::GET( - "http://localhost:8000/api/workflows/?model_id=1000000000&site_id=1000000000", + "http://pecan.localhost/api/workflows/?model_id=1000000000&site_id=1000000000", httr::authenticate("carya", "illinois") ) expect_equal(res$status, 404) @@ -30,7 +30,7 @@ test_that("Calling /api/workflows/ with invalid parameters returns Status 404", test_that("Calling /api/workflows/{id} with invalid workflow id returns Status 404", { res <- httr::GET( - "http://localhost:8000/api/workflows/1000000000", + "http://pecan.localhost/api/workflows/1000000000", httr::authenticate("carya", "illinois") ) expect_equal(res$status, 404) @@ -39,7 +39,7 @@ test_that("Calling /api/workflows/{id} with invalid workflow id returns Status 4 test_that("Submitting XML workflow to /api/workflows/ returns Status 201", { xml_string <- paste0(xml2::read_xml("test_workflows/api.sipnet.xml")) res <- httr::POST( - "http://localhost:8000/api/workflows/", + "http://pecan.localhost/api/workflows/", httr::authenticate("carya", "illinois"), httr::content_type("application/xml"), body = xml_string @@ -51,7 +51,7 @@ test_that("Submitting JSON workflow to /api/workflows/ returns Status 201", { Sys.sleep(2) json_workflow <- jsonlite::read_json("test_workflows/api.sipnet.json") res <- httr::POST( - "http://localhost:8000/api/workflows/", + "http://pecan.localhost/api/workflows/", httr::authenticate("carya", "illinois"), body = json_workflow, encode='json' @@ -61,7 +61,7 @@ test_that("Submitting JSON workflow to /api/workflows/ returns Status 201", { test_that("Calling /api/workflows/{id}/status with valid workflow id returns Status 200", { res <- httr::GET( - paste0("http://localhost:8000/api/workflows/", 99000000031, "/status"), + paste0("http://pecan.localhost/api/workflows/", 99000000031, "/status"), httr::authenticate("carya", "illinois") ) expect_equal(res$status, 200) @@ -69,7 +69,7 @@ test_that("Calling /api/workflows/{id}/status with valid workflow id returns Sta test_that("Calling /api/workflows/{id}/status with invalid parameters returns Status 404", { res <- httr::GET( - "http://localhost:8000/api/workflows/0/status", + "http://pecan.localhost/api/workflows/0/status", httr::authenticate("carya", "illinois") ) expect_equal(res$status, 404) @@ -77,7 +77,7 @@ test_that("Calling /api/workflows/{id}/status with invalid parameters returns St test_that("Calling /api/workflows/{id}/file/{filename} with valid parameters returns Status 200", { res <- httr::GET( - paste0("http://localhost:8000/api/workflows/", 99000000031, "/file/", "pecan.CONFIGS.xml"), + paste0("http://pecan.localhost/api/workflows/", 99000000031, "/file/", "pecan.CONFIGS.xml"), httr::authenticate("carya", "illinois") ) expect_equal(res$status, 200) @@ -85,7 +85,7 @@ test_that("Calling /api/workflows/{id}/file/{filename} with valid parameters ret test_that("Calling /api/workflows/{id}/file/{filename} with invalid parameters returns Status 404", { res <- httr::GET( - "http://localhost:8000/api/workflows/0/file/randomfile.txt", + "http://pecan.localhost/api/workflows/0/file/randomfile.txt", httr::authenticate("carya", "illinois") ) expect_equal(res$status, 404) diff --git a/base/all/DESCRIPTION b/base/all/DESCRIPTION index ed96934ee9a..2cad68114e2 100644 --- a/base/all/DESCRIPTION +++ b/base/all/DESCRIPTION @@ -1,9 +1,7 @@ Package: PEcAn.all Type: Package -Title: PEcAn functions used for ecological forecasts and - reanalysis -Version: 1.7.2.9000 -Date: 2021-10-04 +Title: PEcAn Functions Used for Ecological Forecasts and Reanalysis +Version: 1.8.0.9000 Authors@R: c(person("Mike", "Dietze", role = c("aut"), email = "dietze@bu.edu"), person("David", "LeBauer", role = c("aut", "cre"), @@ -64,6 +62,7 @@ Depends: Imports: utils Suggests: + mockery, PEcAn.ED2, PEcAn.SIPNET, PEcAn.BIOCRO, @@ -71,10 +70,11 @@ Suggests: PEcAn.LINKAGES, PEcAn.allometry, PEcAn.photosynthesis, + sessioninfo, testthat License: BSD_3_clause + file LICENSE Copyright: Authors LazyData: true Encoding: UTF-8 Roxygen: list(markdown = TRUE) -RoxygenNote: 7.2.3 +RoxygenNote: 7.3.2 diff --git a/base/all/LICENSE b/base/all/LICENSE index 9e38c2dc685..09ef35a60b4 100644 --- a/base/all/LICENSE +++ b/base/all/LICENSE @@ -1,29 +1,3 @@ -University of Illinois/NCSA Open Source License - -Copyright (c) 2012, University of Illinois, NCSA. All rights reserved. - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal with the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -- Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimers. -- Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimers in the - documentation and/or other materials provided with the distribution. -- Neither the names of University of Illinois, NCSA, nor the names - of its contributors may be used to endorse or promote products - derived from this Software without specific prior written permission. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR -ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF -CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE SOFTWARE. - +YEAR: 2024 +COPYRIGHT HOLDER: PEcAn Project +ORGANIZATION: PEcAn Project, authors affiliations diff --git a/base/all/NAMESPACE b/base/all/NAMESPACE index a7e08575d38..150394015c9 100644 --- a/base/all/NAMESPACE +++ b/base/all/NAMESPACE @@ -1,3 +1,4 @@ # Generated by roxygen2: do not edit by hand +S3method(print,pecan_version_report) export(pecan_version) diff --git a/base/all/NEWS.md b/base/all/NEWS.md index 04fcb3fa32e..1c4ff017520 100644 --- a/base/all/NEWS.md +++ b/base/all/NEWS.md @@ -1,4 +1,13 @@ -# PEcAn.all 1.7.2.9000 +# PEcAn.all 1.8.0.9000 + +## License change +* PEcAn.all is now distributed under the BSD three-clause license instead of the NCSA Open Source license. + +## Changed +* `pecan_version()` now reports the Git revision (if known) for each package, + and prints its results more compactly for easier reading. + +# PEcAn.all 1.8.0 ## Added diff --git a/base/all/R/pecan_version.R b/base/all/R/pecan_version.R index 51f983f0be9..7d686af6d1f 100644 --- a/base/all/R/pecan_version.R +++ b/base/all/R/pecan_version.R @@ -16,13 +16,16 @@ #' locations in `.libPaths()`, or if you've loaded a new version into your #' current session by loading it from its source directory without installing #' it to the R library. -#' If you see multiple rows unexpectedly, try `find.package(, verbose = TRUE)` to see where each version was found. +#' If you see multiple rows unexpectedly, try +#' `find.package(, verbose = TRUE)` to see where each version was found. #' #' @param version PEcAn release number to use for expected package versions #' @param exact Show only tags that exactly match `version`, #' or all tags that have it as a substring? #' @return data frame with columns for package name, expected version(s), -#' and installed version +#' installed version, and Git hash (if known). +#' If the `sessioninfo` package is installed, an additional column reports +#' where each package was installed from: local, github, CRAN, etc. #' #' @examples #' pecan_version() @@ -35,25 +38,6 @@ #' @export pecan_version <- function(version = max(PEcAn.all::pecan_releases$version), exact = FALSE) { - all_pkgs <- as.data.frame(utils::installed.packages()) - our_pkgs <- all_pkgs[ - grepl("PEcAn", all_pkgs$Package), - c("Package", "Version") - ] - colnames(our_pkgs) <- c("package", "installed") - our_pkgs$installed <- package_version(our_pkgs$installed) - - # Check in currently loaded packages too, - # add rows for any that differ from installed versions - sess <- utils::sessionInfo() - sess <- c(sess$otherPkgs, sess$loadedOnly) - our_loaded <- sess[grepl("PEcAn", names(sess))] - our_loaded <- data.frame( - package = names(our_loaded), - installed = sapply(our_loaded, `[[`, "Version")) - our_loaded$installed <- package_version(our_loaded$installed) - our_pkgs <- merge(our_pkgs, our_loaded, all = TRUE) - if (!exact) { version <- sapply( X = version, @@ -62,11 +46,68 @@ pecan_version <- function(version = max(PEcAn.all::pecan_releases$version), ) version <- unique(unlist(version)) } + cols_to_return <- c("package", version, "installed", "build_hash") + + + if (requireNamespace("sessioninfo", quietly = TRUE)) { + cols_to_return <- c(cols_to_return, "source") - res <- merge(our_pkgs, PEcAn.all::pecan_version_history, all = TRUE) - res <- res[, c("package", version, "installed")] + all_pkgs <- sessioninfo::package_info(pkgs = "installed", dependencies = FALSE) + our_pkgs <- all_pkgs[grepl("PEcAn", all_pkgs$package),] - drop_na_version_rows(res) + # Why do we need this when `pkgs = "installed"` usually shows loaded too? + # Because there are times a package is loaded but not installed + # (e.g. notably during R CMD check) + all_loaded <- sessioninfo::package_info(pkgs = "loaded", dependencies = FALSE) + our_loaded <- all_loaded[grepl("PEcAn", all_loaded$package),] + + # TODO: consider using package_info's callouts of packages where loaded and + # installed versions mismatch -- it's a more elegant version of what we + # were trying for with the "multiple rows for packages with multiple + # versions found" behavior. + our_pkgs <- merge( + x = our_pkgs[, c("package", "ondiskversion", "source")], + y = our_loaded[, c("package", "loadedversion", "source")], + by.x = c("package", "ondiskversion", "source"), + by.y = c("package", "loadedversion", "source"), + all = TRUE, + sort = TRUE) + colnames(our_pkgs) <- c("package", "installed", "source") + our_pkgs$installed <- package_version(our_pkgs$installed) + + } else { + all_pkgs <- as.data.frame(utils::installed.packages()) + our_pkgs <- all_pkgs[ + grepl("PEcAn", all_pkgs$Package), + c("Package", "Version") + ] + colnames(our_pkgs) <- c("package", "installed") + our_pkgs$installed <- package_version(our_pkgs$installed) + sess <- utils::sessionInfo() + sess <- c(sess$otherPkgs, sess$loadedOnly) + our_loaded <- sess[grepl("PEcAn", names(sess))] + our_loaded <- data.frame( + package = names(our_loaded), + installed = sapply(our_loaded, `[[`, "Version")) + our_loaded$installed <- package_version(our_loaded$installed) + our_pkgs <- merge(our_pkgs, our_loaded, all = TRUE, sort = TRUE) + our_pkgs <- our_pkgs[!duplicated(our_pkgs),] + } + + want_hash <- !is.na(our_pkgs$installed) + our_pkgs$build_hash[want_hash] <- sapply( + our_pkgs$package[want_hash], + get_buildhash) + + res <- merge( + x = our_pkgs, + y = PEcAn.all::pecan_version_history, + all = TRUE) + res <- drop_na_version_rows(res[, cols_to_return]) + rownames(res) <- res$package + class(res) <- c("pecan_version_report", class(res)) + + res } # Remove rows where all versions are missing @@ -75,3 +116,46 @@ drop_na_version_rows <- function(df) { stopifnot(colnames(df)[[1]] == "package") df[rowSums(is.na(df[, -1])) < ncol(df[, -1]), ] } + + +# Look up git revision, if recorded, from an installed PEcAn package +get_buildhash <- function(pkg) { + # Set if pkg was installed from r-universe or via install_github() + desc_sha <- utils::packageDescription(pkg, fields = "RemoteSha") + if (!is.na(desc_sha)) { + return(substr(desc_sha, 1, 10)) + } + # Set if PECAN_GIT_REV was set during install (includes `make install`) + get0(".build_hash", envir = asNamespace(pkg), ifnotfound = NA_character_) +} + + +# print method for version +# (Just to help it display more compactly) +#' @export +print.pecan_version_report <- function(x, ...) { + + dots <- list(...) + if (is.null(dots$row.names)) { dots$row.names <- FALSE } + if (is.null(dots$right)) { dots$right <- FALSE } + + xx <- as.data.frame(x) + # only print hash for dev versions + # (typically x.y.z.9000, but we'll use anything with a 4th version component) + skip_hash <- is.na(xx$installed[,4]) | is.na(xx$build_hash) + xx$build_hash[skip_hash] <- "" + xx$build_hash <- sub(".{4}\\+mod$", "+mod", xx$build_hash) + xx$installed <- paste0( + xx$installed, + sub("(.+)", " (\\1)", xx$build_hash)) + xx$build_hash <- NULL + if (!is.null(xx$source)) { + xx$source <- paste0( + strtrim(xx$source, 17), + ifelse(nchar(xx$source, type="width") <= 17, "", "...")) + } + dots$x <- xx + do.call("print", dots) + + invisible(x) +} diff --git a/base/all/R/version.R b/base/all/R/version.R new file mode 100644 index 00000000000..0e58d885272 --- /dev/null +++ b/base/all/R/version.R @@ -0,0 +1,3 @@ +# Set at package install time, used by pecan.all::pecan_version() +# to identify development versions of packages +.build_hash <- Sys.getenv("PECAN_GIT_REV", "unknown") diff --git a/base/all/data-raw/record_versions.R b/base/all/data-raw/record_versions.R index 655f9d093b5..0253c7cc380 100755 --- a/base/all/data-raw/record_versions.R +++ b/base/all/data-raw/record_versions.R @@ -1,5 +1,7 @@ #!/usr/bin/env Rscript +# SUPERSEDED, DO NOT USE -- instead just edit the CSVs in `data/`. + # Adds the current versions of all PEcAn packages to `pecan_version_history`, # and adds the specified tag and version number to `pecan_releases`. diff --git a/base/all/data/pecan_releases.R b/base/all/data/pecan_releases.R new file mode 100644 index 00000000000..b763821f46a --- /dev/null +++ b/base/all/data/pecan_releases.R @@ -0,0 +1,8 @@ + +pecan_releases <- utils::read.csv( + "pecan_releases.csv", + colClasses = c(tag = "character", date = "Date", version = "character")) + +pecan_releases$version <- package_version(pecan_releases$version) + +rownames(pecan_releases) <- pecan_releases$tag diff --git a/base/all/data/pecan_releases.csv b/base/all/data/pecan_releases.csv new file mode 100644 index 00000000000..dd81a1ce025 --- /dev/null +++ b/base/all/data/pecan_releases.csv @@ -0,0 +1,34 @@ +"tag","date","version" +"v1.0",2011-11-03,1.0 +"v1.0.1",2011-11-14,1.0.1 +"v1.1",2012-05-24,1.1 +"v1.2.5",2012-07-22,1.2.5 +"v1.2.6",2012-08-20,1.2.6 +"v1.3",2013-04-21,1.3 +"v1.3.2",2013-07-17,1.3.2 +"v1.3.4",2014-03-25,1.3.4 +"v1.3.5",2014-05-16,1.3.5 +"v1.3.6",2014-06-28,1.3.6 +"1.3.7",2014-09-09,1.3.7 +"v1.3.6.1",2014-09-27,1.3.6.1 +"v1.4.0",2014-12-10,1.4.0 +"v1.4.1",2015-01-27,1.4.1 +"v1.4.2",2015-05-08,1.4.2 +"v.1.4.3",2015-07-22,1.4.3 +"v1.4.4",2015-11-11,1.4.4 +"v1.4.5",2016-03-03,1.4.5 +"1.4.6",2016-05-06,1.4.6 +"1.4.7",2016-07-14,1.4.7 +"v1.4.8",2016-08-12,1.4.8 +"v1.4.9",2016-12-10,1.4.9 +"v1.4.10",2017-03-28,1.4.10 +"v1.4.10.1",2017-04-21,1.4.10.1 +"v1.5.0",2017-07-13,1.5.0 +"v1.5.1",2017-10-06,1.5.1 +"v1.5.2",2017-12-08,1.5.2 +"v1.5.3",2018-05-17,1.5.3 +"v1.6.0",2018-10-02,1.6.0 +"v1.7.0",2018-12-10,1.7.0 +"v1.7.1",2019-09-20,1.7.1 +"v1.7.2",2021-10-08,1.7.2 +"v1.8.0",2024-07-12,1.8.0 diff --git a/base/all/data/pecan_releases.rda b/base/all/data/pecan_releases.rda deleted file mode 100644 index 366a8b3362c..00000000000 Binary files a/base/all/data/pecan_releases.rda and /dev/null differ diff --git a/base/all/data/pecan_version_history.R b/base/all/data/pecan_version_history.R new file mode 100644 index 00000000000..efff2cdf1ac --- /dev/null +++ b/base/all/data/pecan_version_history.R @@ -0,0 +1,34 @@ + +# Read and format a list of pecan versions + +# The local() wrapper is to avoid adding objects to the package data: +# Any extra vars defined at the top level of this file would be loaded +# into the global environment by `data("pecan_version_history")` + +pecan_version_history <- local({ + pvh <- utils::read.csv( + "pecan_version_history.csv", + colClasses = "character", + check.names = FALSE) + + # We'd like to parse strictly to catch invalid versions (probably typos). + # But we _need_ to allow NAs... and in R < 4.4, package_version did not + # accept NAs unless strict=FALSE. + strict <- TRUE + na_version <- try( + package_version(NA_character_, strict = strict), + silent = TRUE) + if (inherits(na_version, "try-error")) { + strict <- FALSE + } + + for (col in colnames(pvh)) { + if (col != "package") { + pvh[[col]] <- package_version( + pvh[[col]], + strict = strict) + } + } + + pvh +}) diff --git a/base/all/data/pecan_version_history.csv b/base/all/data/pecan_version_history.csv new file mode 100644 index 00000000000..d9cdb3b5276 --- /dev/null +++ b/base/all/data/pecan_version_history.csv @@ -0,0 +1,54 @@ +"package","v1.0","v1.0.1","v1.1","v1.2.5","v1.2.6","v1.3","v1.3.2","v1.3.4","v1.3.5","v1.3.6","v1.3.6.1","1.3.7","v1.4.0","v1.4.1","v1.4.2","v.1.4.3","v1.4.4","v1.4.5","1.4.6","1.4.7","v1.4.8","v1.4.9","v1.4.10","v1.4.10.1","v1.5.0","v1.5.1","v1.5.2","v1.5.3","v1.6.0","v1.7.0","v1.7.1","v1.7.2","v1.8.0" +"PEcAn",NA,NA,NA,1.2.5,1.2.6,1.2.6,1.3.1,1.3.3,1.3.3,1.3.3,1.3.7,1.3.3,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA +"PECAn",1.0,1.0,1.1,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA +"PEcAn.all",NA,NA,NA,1.2.5,1.2.6,1.2.6,1.3.1,1.3.3,1.3.3,1.3.3,1.3.7,1.3.3,1.4.0,1.4.1,1.4.2,1.4.3,1.4.4,1.4.5,1.4.6,1.4.7,1.4.8,1.4.9,1.4.10,1.4.10.1,1.5.0,1.5.1,1.5.2,1.5.3,1.6.0,1.7.0,1.7.1,1.7.2,1.8.0 +"PEcAn.allometry",NA,NA,NA,NA,NA,NA,NA,1.3.3,1.3.3,1.3.3,1.3.7,1.3.3,1.4.0,1.4.1,1.4.2,1.4.3,1.4.4,1.4.5,1.4.6,1.4.7,1.4.8,1.4.9,1.4.10,1.4.10.1,1.5.0,1.5.1,1.5.2,1.5.3,1.6.0,1.7.0,1.7.1,1.7.2,1.7.3 +"PEcAn.assim.batch",NA,NA,NA,NA,1.2.6,1.2.6,1.3.1,1.3.3,1.3.3,1.3.3,1.3.7,1.3.3,1.4.0,1.4.1,1.4.2,1.4.3,1.4.4,1.4.5,1.4.6,1.4.7,1.4.8,1.4.9,1.4.10,1.4.10.1,1.5.0,1.5.1,1.5.2,1.5.3,1.6.0,1.7.0,1.7.1,1.7.2,1.8.0 +"PEcAn.assim.sequential",NA,NA,NA,NA,1.2.6,1.2.6,1.3.1,1.3.3,1.3.3,1.3.3,1.3.7,1.3.3,1.4.0,1.4.1,1.4.2,1.4.3,1.4.4,1.4.5,1.4.6,1.4.7,1.4.8,1.4.9,1.4.10,1.4.10.1,1.5.0,1.5.1,1.5.2,1.5.3,1.6.0,1.7.0,1.7.1,1.7.2,NA +"PEcAn.BASGRA",NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,1.7.2,1.8.0 +"PEcAn.benchmark",NA,NA,NA,NA,NA,NA,NA,NA,1.3.3,1.3.3,1.3.7,1.3.3,1.4.0,1.4.1,1.4.2,1.4.3,1.4.4,1.4.5,1.4.6,1.4.7,1.4.8,1.4.9,1.4.10,1.4.10.1,1.5.0,1.5.1,1.5.2,1.5.3,1.6.0,1.7.0,1.7.1,1.7.2,1.7.3 +"PEcAn.BIOCRO",NA,NA,NA,NA,NA,1.2,1.3.1,1.3.3,1.3.3,1.3.3,1.3.7,1.3.3,1.4.0,1.4.1,1.4.2,1.4.3,1.4.4,1.4.5,1.4.6,1.4.7,1.4.8,1.4.9,1.4.10,1.4.10.1,1.5.0,1.5.1,1.5.2,1.5.3,1.6.0,1.7.0,1.7.1,1.7.2,1.7.3 +"PEcAn.CABLE",NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,1.5.1,1.5.2,1.5.3,1.6.0,1.7.0,1.7.1,1.7.2,1.7.3 +"PEcAn.CLM45",NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,1.4.3,1.4.4,1.4.5,1.4.6,1.4.7,1.4.8,1.4.9,1.4.10,1.4.10.1,1.5.0,1.5.1,1.5.2,1.5.3,1.6.0,1.7.0,1.7.1,1.7.2,1.7.3 +"PEcAn.common",NA,NA,NA,1.2.5,1.2.6,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA +"PEcAn.dalec",NA,NA,NA,NA,NA,NA,NA,1.3.1,1.3.1,1.3.1,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA +"PEcAn.DALEC",NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,1.3.7,1.3.1,1.4.0,1.4.1,1.4.2,1.4.3,1.4.4,1.4.5,1.4.6,1.4.7,1.4.8,1.4.9,1.4.10,1.4.10.1,1.5.0,1.5.1,1.5.2,1.5.3,1.6.0,1.7.0,1.7.1,1.7.2,1.7.3 +"PEcAn.data.atmosphere",NA,NA,NA,NA,NA,1.2.6,1.3.1,1.3.3,1.3.3,1.3.3,1.3.7,1.3.3,1.4.0,1.4.1,1.4.2,1.4.3,1.4.4,1.4.5,1.4.6,1.4.7,1.4.8,1.4.9,1.4.10,1.4.10.1,1.5.0,1.5.1,1.5.2,1.5.3,1.6.0,1.7.0,1.7.1,1.7.2,1.8.0 +"PEcAn.data.hydrology",NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,1.3.7,1.3.3,1.4.0,1.4.1,1.4.2,1.4.3,1.4.4,1.4.5,1.4.6,1.4.7,1.4.8,1.4.9,1.4.10,1.4.10.1,1.5.0,1.5.1,1.5.2,1.5.3,1.6.0,1.7.0,1.7.1,1.7.2,NA +"PEcAn.data.land",NA,NA,NA,1.2.5,1.2.6,1.2.6,1.3.1,1.3.3,1.3.3,1.3.3,1.3.7,1.3.3,1.4.0,1.4.1,1.4.2,1.4.3,1.4.4,1.4.5,1.4.6,1.4.7,1.4.8,1.4.9,1.4.10,1.4.10.1,1.5.0,1.5.1,1.5.2,1.5.3,1.6.0,1.7.0,1.7.1,1.7.2,1.8.0 +"PEcAn.data.mining",NA,NA,NA,NA,NA,NA,NA,1.3.3,1.3.3,1.3.3,1.3.7,1.3.3,1.4.0,1.4.1,1.4.2,1.4.3,1.4.4,1.4.5,1.4.6,1.4.7,1.4.8,1.4.9,1.4.10,1.4.10.1,1.5.0,1.5.1,1.5.2,1.5.3,1.6.0,1.7.0,1.7.1,1.7.2,1.7.3 +"PEcAn.data.remote",NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,1.3.7,1.3.3,1.4.0,1.4.1,1.4.2,1.4.3,1.4.4,1.4.5,1.4.6,1.4.7,1.4.8,1.4.9,1.4.10,1.4.10.1,1.5.0,1.5.1,1.5.2,1.5.3,1.6.0,1.7.0,1.7.1,1.7.2,1.8.0 +"PEcAn.DB",NA,NA,NA,1.2.5,1.2.6,1.2.6,1.3.1,1.3.3,1.3.3,1.3.3,1.3.7,1.3.3,1.4.0,1.4.1,1.4.2,1.4.3,1.4.4,1.4.5,1.4.6,1.4.7,1.4.8,1.4.9,1.4.10,1.4.10.1,1.5.0,1.5.1,1.5.2,1.5.3,1.6.0,1.7.0,1.7.1,1.7.2,1.8.0 +"PEcAn.dvmdostem",NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,1.5.3,1.6.0,1.7.0,1.7.1,1.7.2,1.7.3 +"PEcAn.ED",NA,NA,NA,1.2.5,1.2.6,1.2.6,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA +"PEcAn.ED2",NA,NA,NA,NA,NA,NA,1.3.1,1.3.3,1.3.3,1.3.3,1.3.7,1.3.3,1.4.0,1.4.1,1.4.2,1.4.3,1.4.4,1.4.5,1.4.6,1.4.7,1.4.8,1.4.9,1.4.10,1.4.10.1,1.5.0,1.5.1,1.5.2,1.5.3,1.6.0,1.7.0,1.7.1,1.7.2,1.8.0 +"PEcAn.emulator",NA,NA,NA,NA,NA,1.0,1.3.1,1.3.3,1.3.3,1.3.3,1.3.7,1.3.3,1.4.0,1.4.1,1.4.2,1.4.3,1.4.4,1.4.5,1.4.6,1.4.7,1.4.8,1.4.9,1.4.10,1.4.10.1,1.5.0,1.5.1,1.5.2,1.5.3,1.6.0,1.7.0,1.7.1,1.7.2,1.8.0 +"PEcAn.FATES",NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,1.4.9,1.4.10,1.4.10.1,1.5.0,1.5.1,1.5.2,1.5.3,1.6.0,1.7.0,1.7.1,1.7.2,1.7.3 +"PEcAn.GDAY",NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,1.4.6,1.4.7,1.4.8,1.4.9,1.4.10,1.4.10.1,1.5.0,1.5.1,1.5.2,1.5.3,1.6.0,1.7.0,1.7.1,1.7.2,1.7.3 +"PEcAn.IBIS",NA,NA,NA,1.2.5,1.2.6,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA +"PEcAn.JULES",NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,1.4.7,1.4.8,1.4.9,1.4.10,1.4.10.1,1.5.0,1.5.1,1.5.2,1.5.3,1.6.0,1.7.0,1.7.1,1.7.2,1.7.3 +"PEcAn.LDNDC",NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,1.0.0 +"PEcAn.LINKAGES",NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,1.4.1,1.4.2,1.4.3,1.4.4,1.4.5,1.4.6,1.4.7,1.4.8,1.4.9,1.4.10,1.4.10.1,1.5.0,1.5.1,1.5.2,1.5.3,1.6.0,1.7.0,1.7.1,1.7.2,1.7.3 +"PEcAn.logger",NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,1.5.1,1.5.2,1.5.3,1.6.0,1.7.0,1.7.1,1.8.0,1.8.2 +"PEcAn.LPJGUESS",NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,1.4.6,1.4.7,1.4.8,1.4.9,1.4.10,1.4.10.1,1.5.0,1.5.1,1.5.2,1.5.3,1.6.0,1.7.0,1.7.1,1.7.2,1.7.3 +"PEcAn.MA",NA,NA,NA,1.2.5,1.2.6,1.2.6,1.3.1,1.3.3,1.3.3,1.3.3,1.3.7,1.3.3,1.4.0,1.4.1,1.4.2,1.4.3,1.4.4,1.4.5,1.4.6,1.4.7,1.4.8,1.4.9,1.4.10,1.4.10.1,1.5.0,1.5.1,1.5.2,1.5.3,1.6.0,1.7.0,1.7.1,1.7.2,1.7.3 +"PEcAn.MAAT",NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,1.4.6,1.4.7,1.4.8,1.4.9,1.4.10,1.4.10.1,1.5.0,1.5.1,1.5.2,1.5.3,1.6.0,1.7.0,1.7.1,1.7.2,1.7.3 +"PEcAn.MAESPA",NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,1.4.6,1.4.7,1.4.8,1.4.9,1.4.10,1.4.10.1,1.5.0,1.5.1,1.5.2,1.5.3,1.6.0,1.7.0,1.7.1,1.7.2,1.7.3 +"PEcAn.ModelName",NA,NA,NA,NA,NA,1.2,1.3.1,1.3.3,1.3.3,1.3.3,1.3.7,1.3.3,1.4.0,1.4.1,1.4.2,1.4.3,1.4.4,1.4.5,1.4.6,1.4.7,1.4.8,1.4.9,1.4.10,1.4.10.1,1.5.0,1.5.1,1.5.2,1.5.3,1.6.0,1.7.0,1.7.1,1.7.2,1.8.0 +"PEcAn.photosynthesis",NA,NA,NA,NA,NA,1.2.6,1.3.1,1.3.3,1.3.3,1.3.6,1.3.7,1.3.6,1.4.0,1.4.1,1.4.2,1.4.3,1.4.4,1.4.5,1.4.6,1.4.7,1.4.8,1.4.9,1.4.10,1.4.10.1,1.5.0,1.5.1,1.5.2,1.5.3,1.6.0,1.7.0,1.7.1,1.7.2,1.7.3 +"PEcAn.PRELES",NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,1.4.4,1.4.5,1.4.6,1.4.7,1.4.8,1.4.9,1.4.10,1.4.10.1,1.5.0,1.5.1,1.5.2,1.5.3,1.6.0,1.7.0,1.7.1,1.7.2,1.7.3 +"PEcAn.priors",NA,NA,NA,NA,NA,1.2.6,1.3.1,1.3.3,1.3.3,1.3.3,1.3.7,1.3.3,1.4.0,1.4.1,1.4.2,1.4.3,1.4.4,1.4.5,1.4.6,1.4.7,1.4.8,1.4.9,1.4.10,1.4.10.1,1.5.0,1.5.1,1.5.2,1.5.3,1.6.0,1.7.0,1.7.1,1.7.2,1.7.3 +"PEcAn.qaqc",NA,NA,NA,NA,NA,1.0,1.3.1,1.3.3,1.3.3,1.3.3,1.3.7,1.3.3,1.4.0,1.4.1,1.4.2,1.4.3,1.4.4,1.4.5,1.4.6,1.4.7,1.4.8,1.4.9,1.4.10,1.4.10.1,1.5.0,1.5.1,1.5.2,1.5.3,1.6.0,1.7.0,1.7.1,1.7.2,1.7.3 +"PEcAn.remote",NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,1.5.1,1.5.2,1.5.3,1.6.0,1.7.0,1.7.1,1.7.2,1.8.0 +"PEcAn.rtm",NA,NA,NA,1.2.5,1.2.6,1.2.6,1.3.1,1.3.3,1.3.3,1.3.3,1.3.7,1.3.3,1.4.0,1.4.1,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA +"PEcAn.settings",NA,NA,NA,NA,NA,NA,1.3.1,1.3.3,1.3.3,1.3.3,1.3.7,1.3.3,1.4.0,1.4.1,1.4.2,1.4.3,1.4.4,1.4.5,1.4.6,1.4.7,1.4.8,1.4.9,1.4.10,1.4.10.1,1.5.0,1.5.1,1.5.2,1.5.3,1.6.0,1.7.0,1.7.1,1.7.2,1.8.0 +"PEcAn.SIBCASA",NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,0.0.1 +"PEcAn.SIPNET",NA,NA,NA,1.2.5,1.2.6,1.2.6,1.3.1,1.3.3,1.3.3,1.3.3,1.3.7,1.3.3,1.4.0,1.4.1,1.4.2,1.4.3,1.4.4,1.4.5,1.4.6,1.4.7,1.4.8,1.4.9,1.4.10,1.4.10.1,1.5.0,1.5.1,1.5.2,1.5.3,1.6.0,1.7.0,1.7.1,1.7.2,1.8.0 +"PEcAn.STICS",NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,1.7.2,1.8.0 +"PEcAn.uncertainty",NA,NA,NA,1.2.5,1.2.6,1.2.6,1.3.1,1.3.3,1.3.3,1.3.3,1.3.7,1.3.3,1.4.0,1.4.1,1.4.2,1.4.3,1.4.4,1.4.5,1.4.6,1.4.7,1.4.8,1.4.9,1.4.10,1.4.10.1,1.5.0,1.5.1,1.5.2,1.5.3,1.6.0,1.7.0,1.7.1,1.7.2,1.8.0 +"PEcAn.utils",NA,NA,NA,1.2.5,1.2.6,1.2.6,1.3.1,1.3.3,1.3.3,1.3.3,1.3.7,1.3.3,1.4.0,1.4.1,1.4.2,1.4.3,1.4.4,1.4.5,1.4.6,1.4.7,1.4.8,1.4.9,1.4.10,1.4.10.1,1.5.0,1.5.1,1.5.2,1.5.3,1.6.0,1.7.0,1.7.1,1.7.2,1.8.0 +"PEcAn.visualization",NA,NA,NA,1.2.5,1.2.6,1.2.6,1.3.1,1.3.3,1.3.3,1.3.3,1.3.7,1.3.3,1.4.0,1.4.1,1.4.2,1.4.3,1.4.4,1.4.5,1.4.6,1.4.7,1.4.8,1.4.9,1.4.10,1.4.10.1,1.5.0,1.5.1,1.5.2,1.5.3,1.6.0,1.7.0,1.7.1,1.7.2,1.8.0 +"PEcAn.workflow",NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,1.5.3,1.6.0,1.7.0,1.7.1,1.7.2,1.8.0 +"pecanapi",NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,1.7.0,1.7.1,NA,NA +"PEcAnAssimSequential",NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,1.8.0 +"PEcAnRTM",NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,1.4.2,1.4.3,1.4.4,1.4.5,1.4.6,1.4.7,1.4.8,1.4.9,1.4.10,1.4.10.1,1.5.0,1.5.1,1.5.2,1.5.3,1.6.0,1.7.0,1.7.1,1.7.2,1.7.3 diff --git a/base/all/data/pecan_version_history.rda b/base/all/data/pecan_version_history.rda deleted file mode 100644 index bccc9674413..00000000000 Binary files a/base/all/data/pecan_version_history.rda and /dev/null differ diff --git a/base/all/man/pecan_version.Rd b/base/all/man/pecan_version.Rd index e2ded17e8b1..1f3c9578b59 100644 --- a/base/all/man/pecan_version.Rd +++ b/base/all/man/pecan_version.Rd @@ -14,7 +14,9 @@ or all tags that have it as a substring?} } \value{ data frame with columns for package name, expected version(s), -and installed version +installed version, and Git hash (if known). +If the \code{sessioninfo} package is installed, an additional column reports +where each package was installed from: local, github, CRAN, etc. } \description{ Reports the currently installed or loaded version(s) of each PEcAn package, @@ -34,7 +36,8 @@ This can occur if you have installed different versions to different locations in \code{.libPaths()}, or if you've loaded a new version into your current session by loading it from its source directory without installing it to the R library. -If you see multiple rows unexpectedly, try \verb{find.package(, verbose = TRUE)} to see where each version was found. +If you see multiple rows unexpectedly, try +\verb{find.package(, verbose = TRUE)} to see where each version was found. } \examples{ pecan_version() diff --git a/base/all/tests/Rcheck_reference.log b/base/all/tests/Rcheck_reference.log index c03d9bb902b..30cecf2ff34 100644 --- a/base/all/tests/Rcheck_reference.log +++ b/base/all/tests/Rcheck_reference.log @@ -14,38 +14,6 @@ New submission Version contains large components (1.7.2.9000) -License components with restrictions and base license permitting such: - BSD_3_clause + file LICENSE -File 'LICENSE': - University of Illinois/NCSA Open Source License - - Copyright (c) 2012, University of Illinois, NCSA. All rights reserved. - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal with the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - - Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimers. - - Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimers in the - documentation and/or other materials provided with the distribution. - - Neither the names of University of Illinois, NCSA, nor the names - of its contributors may be used to endorse or promote products - derived from this Software without specific prior written permission. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - IN NO EVENT SHALL THE CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR - ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF - CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE SOFTWARE. - Strong dependencies not in mainstream repositories: PEcAn.DB, PEcAn.settings, PEcAn.MA, PEcAn.logger, PEcAn.utils, PEcAn.uncertainty, PEcAn.data.atmosphere, PEcAn.data.land, @@ -54,13 +22,6 @@ Strong dependencies not in mainstream repositories: Suggests or Enhances not in mainstream repositories: PEcAn.ED2, PEcAn.SIPNET, PEcAn.BIOCRO, PEcAn.DALEC, PEcAn.LINKAGES, PEcAn.allometry, PEcAn.photosynthesis - -The Title field should be in title case. Current version is: -‘PEcAn functions used for ecological forecasts and reanalysis’ -In title case that is: -‘PEcAn Functions Used for Ecological Forecasts and Reanalysis’ - -The Date field is over a month old. * checking package namespace information ... OK * checking package dependencies ... NOTE Depends: includes the non-default packages: diff --git a/base/all/tests/testthat.R b/base/all/tests/testthat.R index c2756b18b14..6a5740398ca 100644 --- a/base/all/tests/testthat.R +++ b/base/all/tests/testthat.R @@ -1,11 +1,3 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2012 University of Illinois, NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# University of Illinois/NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- library(testthat) library(PEcAn.utils) diff --git a/base/all/tests/testthat/test-pecan_version.R b/base/all/tests/testthat/test-pecan_version.R index 2a9100d4d7a..b66172709a2 100644 --- a/base/all/tests/testthat/test-pecan_version.R +++ b/base/all/tests/testthat/test-pecan_version.R @@ -26,7 +26,7 @@ test_that("pecan_version", { # tags substring matched only when exact = FALSE expect_named( pecan_version("v1.5"), - c("package", paste0("v1.5.", 0:3), "installed") + c("package", paste0("v1.5.", 0:3), "installed", "build_hash", "source") ) expect_error( pecan_version("v1.5", exact = TRUE), @@ -34,14 +34,14 @@ test_that("pecan_version", { ) expect_named( pecan_version("v1.3", exact = TRUE), - c("package", "v1.3", "installed") + c("package", "v1.3", "installed", "build_hash", "source") ) # returns current release if no args given noargs <- pecan_version() expected_tag <- tail(PEcAn.all::pecan_releases, 1)$tag - expect_length(noargs, 3) - expect_named(noargs, c("package", expected_tag, "installed")) + expect_length(noargs, 5) + expect_named(noargs, c("package", expected_tag, "installed", "build_hash", "source")) # Why the `any()`s below? # Because R CMD check runs tests with local test dir added to .libPaths, @@ -67,3 +67,61 @@ test_that("pecan_version", { ) ) }) + + +test_that("pecan_version without sessioninfo", { + + with_sessinfo <- pecan_version() + + # make pecan_version think the sessioninfo package is unavailable + mockery::stub(pecan_version, 'requireNamespace', FALSE) + without_sessinfo <- pecan_version() + + expect_length(with_sessinfo, 5) + expect_length(without_sessinfo, 4) + expect_equal( + with_sessinfo[, colnames(with_sessinfo) != "source"], + without_sessinfo) +}) + +# TODO: Would be nice to add a check here that will notice if the list of PEcAn +# releases falls out of date, but it's not clear what other source of truth +# to consult to determine that. +# +# The approach that failed just before I wrote this note: +# No, the version of PEcAn.all (1.8.1.9000 today) is not reliably in sync with +# the PEcAn version last tagged as a release (1.7.2 today). + + +test_that("printing", { + ver <- structure( + data.frame( + package = "PEcAnFake", + v0.0 = package_version("1.2.3"), + installed = package_version("1.2.3.9000"), + build_hash = "01234567ab", + source = "13 characters"), + class = c("pecan_version_report", "data.frame") + ) + + long_ver <- ver + long_ver$build_hash = "01234567ab+mod" + long_ver$source = "twenty-two characters" + + # hash truncated to fit "+mod" if present + expect_output(print(ver), "01234567ab", fixed = TRUE) + expect_output(print(long_ver), "012345+mod", fixed = TRUE) + + # source truncated to total of 20 chars + expect_output(print(ver), "13 characters$") + expect_output(print(long_ver), "twenty-two charac...", fixed = TRUE) + + # source truncation works on width not glyph count + long_ver$source <- gsub("tw", "\U{1F197}\U{1F192}", long_ver$source) + expect_output(print(long_ver), "\U{1F192}o ch...", fixed = TRUE) + + # dots passed on + expect_output(print(ver), "\n PEcAnFake") + expect_output(print(ver, row.names = TRUE), "\n1 PEcAnFake", fixed = TRUE) + expect_output(print(ver, quote = TRUE), "\n \"PEcAnFake\"", fixed = TRUE) +}) diff --git a/base/all/tests/testthat/test.workflow.R b/base/all/tests/testthat/test.workflow.R index ee055c45c6a..85efe3437c8 100644 --- a/base/all/tests/testthat/test.workflow.R +++ b/base/all/tests/testthat/test.workflow.R @@ -1,12 +1,3 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2012 University of Illinois, NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# University of Illinois/NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- - # TODO This is an integration test (#1125) #context("tests of overall workflow") diff --git a/base/db/DESCRIPTION b/base/db/DESCRIPTION index 0e302137980..f88a16982e4 100644 --- a/base/db/DESCRIPTION +++ b/base/db/DESCRIPTION @@ -1,8 +1,7 @@ Package: PEcAn.DB Type: Package Title: PEcAn Functions Used for Ecological Forecasts and Reanalysis -Version: 1.7.2.9000 -Date: 2021-10-04 +Version: 1.8.0.9000 Authors@R: c(person("David", "LeBauer", role = c("aut", "cre"), email = "dlebauer@email.arizona.edu"), person("Mike", "Dietze", role = c("aut"), @@ -24,7 +23,7 @@ Authors@R: c(person("David", "LeBauer", role = c("aut", "cre"), person("Chris", "Black", role = c("aut"), email = "chris@ckblack.org"), person("Liam", "Burke", role = c("aut"), - email = "liam.burke24@gmail.com>"), + email = "liam.burke24@gmail.com"), person("Ryan", "Kelly", role = c("aut"), email = "rykelly@bu.edu"), person("Dan", "Wang", role = c("aut")), @@ -41,10 +40,9 @@ Description: The Predictive Ecosystem Carbon Analyzer (PEcAn) is a scientific streamline the interaction between data and models, and to improve the efficacy of scientific investigation. Imports: - curl, DBI, - dbplyr, - dplyr, + dbplyr (>= 2.4.0), + dplyr (>= 1.1.2), fs, glue, lubridate, @@ -60,31 +58,24 @@ Imports: tibble, tidyr, units, - XML Suggests: bit64, data.table, here, - knitr, + knitr (>= 1.42), + mockery (>= 0.4.3), RPostgreSQL, RPostgres, RSQLite, rcrossref, rmarkdown (>= 2.19), testthat (>= 2.0.0), - tidyverse -X-Comment-Remotes: - Installing markdown from GitHub because as of 2023-02-05, this is the - easiest way to get version >= 2.19 onto Docker images that use older - Rstudio Package Manager snapshots. - When building on a system that finds a new enough version on CRAN, - OK to remove the Remotes line and this comment. -Remotes: - github::rstudio/rmarkdown@v2.20 + tidyverse, + withr License: BSD_3_clause + file LICENSE -VignetteBuilder: knitr +VignetteBuilder: knitr, rmarkdown Copyright: Authors LazyLoad: yes LazyData: FALSE Encoding: UTF-8 -RoxygenNote: 7.2.3 +RoxygenNote: 7.3.2 diff --git a/base/db/LICENSE b/base/db/LICENSE index 9e38c2dc685..09ef35a60b4 100644 --- a/base/db/LICENSE +++ b/base/db/LICENSE @@ -1,29 +1,3 @@ -University of Illinois/NCSA Open Source License - -Copyright (c) 2012, University of Illinois, NCSA. All rights reserved. - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal with the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -- Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimers. -- Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimers in the - documentation and/or other materials provided with the distribution. -- Neither the names of University of Illinois, NCSA, nor the names - of its contributors may be used to endorse or promote products - derived from this Software without specific prior written permission. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR -ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF -CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE SOFTWARE. - +YEAR: 2024 +COPYRIGHT HOLDER: PEcAn Project +ORGANIZATION: PEcAn Project, authors affiliations diff --git a/base/db/NEWS.md b/base/db/NEWS.md index 7fb4ff753a2..5d008066052 100644 --- a/base/db/NEWS.md +++ b/base/db/NEWS.md @@ -1,4 +1,10 @@ -# PEcAn.DB 1.7.2.9000 +# PEcAn.DB 1.8.0.9000 + +## License change +* PEcAn.DB is now distributed under the BSD three-clause license instead of the NCSA Open Source license. + + +# PEcAn.DB 1.8.0 ## Added @@ -8,6 +14,7 @@ * New function `convert_input`, used to convert between formats while reusing existing files where possible. It previously lived in package `PEcAn.utils`, but was moved here to simplify dependencies. (#3026; @nanu1605) +* `get.trait.data` gains new argument `write` (with default FALSE), passed on to `get.trait.data.pft` (@Aariq, #3065). # PEcAn.DB 1.7.2 diff --git a/base/db/R/assign.treatments.R b/base/db/R/assign.treatments.R index 5e22285e837..3779d327676 100644 --- a/base/db/R/assign.treatments.R +++ b/base/db/R/assign.treatments.R @@ -1,19 +1,9 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2012 University of Illinois, NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# University of Illinois/NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- - -##-----------------------------------------------------------------------------# ##' Change treatments to sequential integers ##' ##' Assigns all control treatments the same value, then assigns unique treatments ##' within each site. Each site is required to have a control treatment. ##' The algorithm (incorrectly) assumes that each site has a unique set of experimental -##' treatments. This assumption is required by the data in BETTdb that does not always consistently name treatments or quantity them in the managements table. Also it avoids having the need to estimate treatment by site interactions in the meta analysis model. This model uses data in the control treatment to estimate model parameters so the impact of the assumption is minimal. +##' treatments. This assumption is required by the data in BETYdb that does not always consistently name treatments or quantity them in the managements table. Also it avoids having the need to estimate treatment by site interactions in the meta analysis model. This model uses data in the control treatment to estimate model parameters so the impact of the assumption is minimal. ##' @name assign.treatments ##' @title assign.treatments ##' @param data input data diff --git a/base/db/R/check.lists.R b/base/db/R/check.lists.R index 18e7c8b3840..cc131ca59ed 100644 --- a/base/db/R/check.lists.R +++ b/base/db/R/check.lists.R @@ -1,13 +1,3 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2012 University of Illinois, NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# University of Illinois/NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- - -##--------------------------------------------------------------------------------------------------# ##' Check two lists. Identical does not work since one can be loaded ##' from the database and the other from a CSV file. ##' diff --git a/base/db/R/convert_input.R b/base/db/R/convert_input.R index 8203fa7244b..1ff74a13014 100644 --- a/base/db/R/convert_input.R +++ b/base/db/R/convert_input.R @@ -45,7 +45,6 @@ ##' @param host Named list identifying the machine where conversion should be performed. ##' Currently only \code{host$name} and \code{host$Rbinary} are used by \code{convert_input}, ##' but the whole list is passed to other functions -##' @param browndog List of information related to browndog conversion. NULL if browndog is not to be used for conversion ##' @param write Logical: Write new file records to the database? ##' @param format.vars Passed on as arguments to \code{fcn} ##' @param overwrite Logical: If a file already exists, create a fresh copy? Passed along to fcn. @@ -77,7 +76,6 @@ convert_input <- fcn, con = con, host, - browndog, write = TRUE, format.vars, overwrite = FALSE, @@ -580,105 +578,7 @@ convert_input <- conversion <- "local.remote" #default - if (!is.null(browndog) && host$name == "localhost") { - # perform conversions with Brown Dog - only works locally right now - - # Determine outputtype using formatname and mimetype of output file Add issue to - # github that extension of formats table to include outputtype Convert to netcdf - # - only using localhost - if (mimetype == "application/x-netcdf") { - outputtype <- "pecan.zip" - } else { - # Convert to model specific format - if (formatname == "ed.met_driver_header_files_format" || formatname == - "ed.met_driver_header files format") { - outputtype <- "ed.zip" - } else if (formatname == "Sipnet.climna") { - outputtype <- "clim" - } else if (formatname == "DALEC meteorology") { - outputtype <- "dalec.dat" - } else if (formatname == "LINKAGES met") { - outputtype <- "linkages.dat" - } else { - PEcAn.logger::logger.severe(paste("Unknown formatname", formatname)) - } - } - - # create curl options - curloptions <- list(followlocation = TRUE) - if (!is.null(browndog$username) && !is.null(browndog$password)) { - curloptions$userpwd = paste( - browndog$username, browndog$password, sep = ":") - curloptions$httpauth = 1L - } - - # check if we can do conversion - h <- curl::new_handle() - curl::handle_setopt(h, .list = curloptions) - out.html <- readLines( - curl::curl( - url = paste0( - "http://dap-dev.ncsa.illinois.edu:8184/inputs/", - browndog$inputtype), - handle = h)) - if (outputtype %in% out.html) { - PEcAn.logger::logger.info( - "Conversion from", browndog$inputtype, - "to", outputtype, - "through Brown Dog") - conversion <- "browndog" - } - } - - if (conversion == "browndog") { - url <- file.path(browndog$url, outputtype) - - # loop over files in localhost and zip to send to Brown Dog - files <- list.files(dbfile$file_path, pattern = dbfile$file_name) - files <- grep(dbfile$file_name, files, value = TRUE) - zipfile <- paste0(dbfile$file_name, ".", browndog$inputtype) - system(paste("cd", dbfile$file_path, "; zip", zipfile, paste(files, collapse = " "))) - zipfile <- file.path(dbfile$file_path, zipfile) - - # check for and create output folder - if (!file.exists(outfolder)) { - dir.create(outfolder, showWarnings = FALSE, recursive = TRUE) - } - - # post zipped file to Brown Dog - h <- curl::new_handle() - curl::handle_setopt(handle = h, .list = curloptions) - curl::handle_setform(handle = h, fileData = curl::form_file(zipfile)) - html <- readLines(curl::curl(url = url, handle = h)) - link <- XML::getHTMLLinks(html) - file.remove(zipfile) - - # download converted file - outfile <- file.path(outfolder, unlist(strsplit(basename(link), "_"))[2]) - PEcAn.utils::download.url(url = link, file = outfile, timeout = 600, .opts = curloptions, retry = TRUE) - - # unzip downloaded file if necessary - if (file.exists(outfile)) { - if (utils::tail(unlist(strsplit(outfile, "[.]")), 1) == "zip") { - fname <- utils::unzip(outfile, list = TRUE)$Name - utils::unzip(outfile, files = fname, exdir = outfolder, overwrite = TRUE) - file.remove(outfile) - } else { - fname <- list.files(outfolder) - } - } - - result <- data.frame( - # contains one row for each file in fname - file = file.path(outfolder, fname), - host = PEcAn.remote::fqdn(), - mimetype = mimetype, - formatname = formatname, - startdate = paste(input$start_date, "00:00:00"), - enddate = paste(input$end_date, "23:59:59"), - stringsAsFactors = FALSE) - - } else if (conversion == "local.remote") { + if (conversion == "local.remote") { # perform conversion on local or remote host fcn.args <- input.args diff --git a/base/db/R/covariate.functions.R b/base/db/R/covariate.functions.R index 31c93009ee9..c40e3ca5a05 100644 --- a/base/db/R/covariate.functions.R +++ b/base/db/R/covariate.functions.R @@ -1,12 +1,3 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2012 University of Illinois, NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# University of Illinois/NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- - ######################## COVARIATE FUNCTIONS ################################# ##--------------------------------------------------------------------------------------------------# diff --git a/base/db/R/dbfiles.R b/base/db/R/dbfiles.R index 28509daef8f..1cab9e069e7 100644 --- a/base/db/R/dbfiles.R +++ b/base/db/R/dbfiles.R @@ -1,12 +1,3 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2012 University of Illinois, NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# University of Illinois/NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- - ##' Function to insert a file into the dbfiles table as an input ##' ##' This will write into the dbfiles, inputs, machines and formats the required @@ -59,9 +50,8 @@ dbfile.input.insert <- function(in.path, in.prefix, siteid, startdate, enddate, # setup parent part of query if specified - if (is.na(parentid)) { - parent <- "" - } else { + parent <- "" + if (!is.na(parentid)) { parent <- paste0(" AND parent_id=", parentid) } @@ -71,7 +61,7 @@ dbfile.input.insert <- function(in.path, in.prefix, siteid, startdate, enddate, "SELECT * FROM inputs WHERE site_id=", siteid, " AND name= '", name, "' AND format_id=", formatid, - parent + parent, ";" ), con = con ) @@ -120,26 +110,26 @@ dbfile.input.insert <- function(in.path, in.prefix, siteid, startdate, enddate, "INSERT INTO inputs ", "(site_id, format_id, name) VALUES (", siteid, ", ", formatid, ", '", name, - "'", ") RETURNING id" + "'", ") RETURNING id;" ) } else if (parent == "" && !is.null(startdate)) { cmd <- paste0( "INSERT INTO inputs ", "(site_id, format_id, start_date, end_date, name) VALUES (", siteid, ", ", formatid, ", '", startdate, "', '", enddate, "','", name, - "') RETURNING id" + "') RETURNING id;" ) } else if (is.null(startdate)) { cmd <- paste0( "INSERT INTO inputs ", "(site_id, format_id, name, parent_id) VALUES (", - siteid, ", ", formatid, ", '", name, "',", parentid, ") RETURNING id" + siteid, ", ", formatid, ", '", name, "',", parentid, ") RETURNING id;" ) } else { cmd <- paste0( "INSERT INTO inputs ", "(site_id, format_id, start_date, end_date, name, parent_id) VALUES (", - siteid, ", ", formatid, ", '", startdate, "', '", enddate, "','", name, "',", parentid, ") RETURNING id" + siteid, ", ", formatid, ", '", startdate, "', '", enddate, "','", name, "',", parentid, ") RETURNING id;" ) } # This is the id that we just registered @@ -150,7 +140,7 @@ dbfile.input.insert <- function(in.path, in.prefix, siteid, startdate, enddate, inputid <- db.query( query = paste0( "SELECT id FROM inputs WHERE site_id=", siteid, - " AND format_id=", formatid + " AND format_id=", formatid, ";" ), con = con )$id @@ -251,13 +241,13 @@ dbfile.input.check <- function(siteid, startdate = NULL, enddate = NULL, mimetyp formatid <- get.id(table = "formats", colnames = c("mimetype_id", "name"), values = c(mimetypeid, formatname), con = con) if (is.null(formatid)) { - invisible(data.frame()) + return (invisible(data.frame())) } # setup parent part of query if specified - if (is.na(parentid)) { - parent <- "" - } else { + parent <- "" + + if (!is.na(parentid)) { parent <- paste0(" AND parent_id=", parentid) } @@ -459,7 +449,7 @@ dbfile.posterior.check <- function(pft, mimetype, formatname, con, hostname = PE # find appropriate pft pftid <- get.id(table = "pfts", values = "name", colnames = pft, con = con) if (is.null(pftid)) { - invisible(data.frame()) + return (invisible(data.frame())) } # find appropriate format @@ -470,7 +460,7 @@ dbfile.posterior.check <- function(pft, mimetype, formatname, con, hostname = PE formatid <- get.id(table = "formats", colnames = c("mimetype_id", "name"), values = c(mimetypeid, formatname), con = con) if (is.null(formatid)) { - invisible(data.frame()) + return (invisible(data.frame())) } # find appropriate posterior @@ -482,7 +472,7 @@ dbfile.posterior.check <- function(pft, mimetype, formatname, con, hostname = PE con = con )[["id"]] if (is.null(posteriorid)) { - invisible(data.frame()) + return (invisible(data.frame())) } invisible(dbfile.check(type = "Posterior", container.id = posteriorid, con = con, hostname = hostname)) @@ -648,12 +638,12 @@ dbfile.file <- function(type, id, con, hostname = PEcAn.remote::fqdn()) { if (nrow(files) > 1) { PEcAn.logger::logger.warn("multiple files found for", id, "returned; using the first one found") - invisible(file.path(files[1, "file_path"], files[1, "file_name"])) + return(invisible(file.path(files[1, "file_path"], files[1, "file_name"]))) } else if (nrow(files) == 1) { - invisible(file.path(files[1, "file_path"], files[1, "file_name"])) + return(invisible(file.path(files[1, "file_path"], files[1, "file_name"]))) } else { PEcAn.logger::logger.warn("no files found for ", id, "in database") - invisible(NA) + return(invisible(NA)) } } @@ -671,7 +661,8 @@ dbfile.id <- function(type, file, con, hostname = PEcAn.remote::fqdn()) { # find appropriate host hostid <- db.query(query = paste0("SELECT id FROM machines WHERE hostname='", hostname, "'"), con = con)[["id"]] if (is.null(hostid)) { - invisible(NA) + PEcAn.logger::logger.warn("hostid not found in database") + return (invisible(NA)) } # find file diff --git a/base/db/R/derive.trait.R b/base/db/R/derive.trait.R index 7abc5b9cc64..01264b25e5e 100644 --- a/base/db/R/derive.trait.R +++ b/base/db/R/derive.trait.R @@ -1,13 +1,3 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2012 University of Illinois, NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# University of Illinois/NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- - -##--------------------------------------------------------------------------------------------------# ##' ##' Performs an arithmetic function, FUN, over a series of traits and returns ##' the result as a derived trait. diff --git a/base/db/R/derive.traits.R b/base/db/R/derive.traits.R index c5b8be9e7ec..cb8265149ab 100644 --- a/base/db/R/derive.traits.R +++ b/base/db/R/derive.traits.R @@ -1,13 +1,3 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2012 University of Illinois, NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# University of Illinois/NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- - -##--------------------------------------------------------------------------------------------------# ##' Equivalent to derive.trait(), but operates over a series of trait datasets, ##' as opposed to individual trait rows. See \code{\link{derive.trait}}; for more information. ##' diff --git a/base/db/R/fetch.stats2se.R b/base/db/R/fetch.stats2se.R index 7425399f3c3..d0bad8ae465 100644 --- a/base/db/R/fetch.stats2se.R +++ b/base/db/R/fetch.stats2se.R @@ -1,13 +1,3 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2012 University of Illinois, NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# University of Illinois/NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- - -##--------------------------------------------------------------------------------------------------# ##' Queries data from the trait database and transforms statistics to SE ##' ##' Performs query and then uses \code{transformstats} to convert miscellaneous statistical summaries diff --git a/base/db/R/get.trait.data.R b/base/db/R/get.trait.data.R index f181856639c..b307da08c0b 100644 --- a/base/db/R/get.trait.data.R +++ b/base/db/R/get.trait.data.R @@ -1,13 +1,3 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2012 University of Illinois, NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# University of Illinois/NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- - -##--------------------------------------------------------------------------------------------------# ##' Get trait data from the database. ##' ##' This will use the following items from settings: diff --git a/base/db/R/get.trait.data.pft.R b/base/db/R/get.trait.data.pft.R index 5556930d449..a2af9c5215a 100644 --- a/base/db/R/get.trait.data.pft.R +++ b/base/db/R/get.trait.data.pft.R @@ -1,13 +1,3 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2012 University of Illinois, NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# University of Illinois/NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- - -##--------------------------------------------------------------------------------------------------# ##' Get trait data from the database for a single PFT ##' ##' @details `pft` should be a list containing at least `name` and `outdir`, and optionally `posteriorid` and `constants`. BEWARE: All existing files in `outir` will be deleted! diff --git a/base/db/R/pft.add.spp.R b/base/db/R/pft.add.spp.R index ce8cf1c3b10..0193aae9c5a 100644 --- a/base/db/R/pft.add.spp.R +++ b/base/db/R/pft.add.spp.R @@ -1,11 +1,3 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2012 University of Illinois, NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# University of Illinois/NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- ## M. Dietze ##' adds a list of species to a pft based on USDA Plants acronyms ##' diff --git a/base/db/R/query.data.R b/base/db/R/query.data.R index bed236305af..73ca64a7b81 100644 --- a/base/db/R/query.data.R +++ b/base/db/R/query.data.R @@ -1,13 +1,3 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2012 University of Illinois, NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# University of Illinois/NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- - -##--------------------------------------------------------------------------------------------------# ##' Function to query data from database for specific species and convert stat to SE ##' ##' @name query.data diff --git a/base/db/R/query.dplyr.R b/base/db/R/query.dplyr.R index b0501a9abcb..d1f0a1a401a 100644 --- a/base/db/R/query.dplyr.R +++ b/base/db/R/query.dplyr.R @@ -61,7 +61,7 @@ dplyr.count <- function(df) { #' @param unit string containing CF-style time unit including origin (e.g. "days since 2010-01-01") #' @export ncdays2date <- function(time, unit) { - date <- lubridate::parse_date_time(unit, c("ymd_hms", "ymd_h", "ymd")) + date <- lubridate::parse_date_time(unit, c("ymd_HMS", "ymd_H", "ymd")) days <- PEcAn.utils::ud_convert(time, unit, paste("days since ", date)) seconds <- PEcAn.utils::ud_convert(days, "days", "seconds") return(as.POSIXct.numeric(seconds, origin = date, tz = "UTC")) @@ -124,7 +124,7 @@ workflows <- function(bety, ensemble = FALSE) { #' @export workflow <- function(bety, workflow_id) { workflows(bety) %>% - dplyr::filter(.data$workflow_id == !!.data$workflow_id) + dplyr::filter(.data$workflow_id == !!workflow_id) } # workflow diff --git a/base/db/R/query.file.path.R b/base/db/R/query.file.path.R index 071b6af12b8..ceb79f99685 100644 --- a/base/db/R/query.file.path.R +++ b/base/db/R/query.file.path.R @@ -8,11 +8,11 @@ ##' @author Betsy Cowdery query.file.path <- function(input.id, host_name, con){ machine.host <- PEcAn.DB::default_hostname(host_name) - machine <- db.query(query = paste0("SELECT * from machines where hostname = '",machine.host,"'"), con = con) + machine <- db.query(query = paste0("SELECT * from machines where hostname = '",machine.host,"';"), con = con) dbfile <- db.query( query = paste( "SELECT file_name,file_path from dbfiles where container_id =", input.id, - " and container_type = 'Input' and machine_id =", machine$id + " and container_type = 'Input' and machine_id =", machine$id, ";" ), con = con ) diff --git a/base/db/R/query.pft.R b/base/db/R/query.pft.R index 3a4b231953f..1c45f215622 100644 --- a/base/db/R/query.pft.R +++ b/base/db/R/query.pft.R @@ -1,12 +1,3 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2012 University of Illinois, NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# University of Illinois/NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- -#--------------------------------------------------------------------------------------------------# ##' select plant id's associated with pft ##' ##' @title Query species given pft name diff --git a/base/db/R/query.prior.R b/base/db/R/query.prior.R index 736565641f7..6c078dd8aa4 100644 --- a/base/db/R/query.prior.R +++ b/base/db/R/query.prior.R @@ -1,12 +1,3 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2012 University of Illinois, NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# University of Illinois/NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- -#--------------------------------------------------------------------------------------------------# ##' Query Priors ##' ##' Query priors associated with a plant functional type and a set of traits. diff --git a/base/db/R/query.trait.data.R b/base/db/R/query.trait.data.R index 3162372f0f5..e55b781ab9b 100644 --- a/base/db/R/query.trait.data.R +++ b/base/db/R/query.trait.data.R @@ -1,13 +1,3 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2012 University of Illinois, NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# University of Illinois/NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- - -##--------------------------------------------------------------------------------------------------# ##' Extract trait data from database ##' ##' Extracts data from database for a given trait and set of species, diff --git a/base/db/R/query.traits.R b/base/db/R/query.traits.R index 5b4dab14c68..0d7f54815a1 100644 --- a/base/db/R/query.traits.R +++ b/base/db/R/query.traits.R @@ -1,12 +1,3 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2012 University of Illinois, NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# University of Illinois/NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- -#------------------------------------------------------------------------------# ##' Query available trait data associated with a given pft and a list of traits ##' ##' @name query.traits diff --git a/base/db/R/query.yields.R b/base/db/R/query.yields.R index d28c8bf10dd..dfaef5956dc 100644 --- a/base/db/R/query.yields.R +++ b/base/db/R/query.yields.R @@ -1,13 +1,3 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2012 University of Illinois, NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# University of Illinois/NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- - -##--------------------------------------------------------------------------------------------------# ##' Function to query yields data from database for specific species and convert stat to SE ##' ##' @name query.yields @@ -24,13 +14,20 @@ query.yields <- function(trait = 'yield', spstr, extra.columns = '', con = NULL, ids_are_cultivars = FALSE, ...){ member_column <- if (ids_are_cultivars) {"cultivar_id"} else {"specie_id"} + + if(!is.null(extra.columns)) { + if(!is.character(extra.columns) || length(extra.columns) != 1) { + PEcAn.logger::logger.severe("`extra.columns` must be a string") + } + } + query <- paste("select yields.id, yields.citation_id, yields.site_id, treatments.name, yields.date, yields.time, yields.cultivar_id, yields.specie_id, yields.mean, yields.statname, yields.stat, yields.n, variables.name as vname, month(yields.date) as month,", - extra.columns, + if(extra.columns != '') { paste(extra.columns, ",", sep = "") } else {""}, "treatments.control, sites.greenhouse from yields left join treatments on (yields.treatment_id = treatments.id) @@ -38,7 +35,7 @@ query.yields <- function(trait = 'yield', spstr, extra.columns = '', con = NULL, left join variables on (yields.variable_id = variables.id) where ", member_column, " in (", spstr,");", sep = "") if(!trait == 'yield'){ - query <- gsub(");", paste(" and variables.name in ('", trait,"');", sep = ""), query) + query <- gsub(";", paste(" and variables.name in ('", trait,"');", sep = ""), query) } return(fetch.stats2se(connection = con, query = query)) diff --git a/base/db/R/symmetric_setdiff.R b/base/db/R/symmetric_setdiff.R index 1056ec0b5fd..9f8b2f146dd 100644 --- a/base/db/R/symmetric_setdiff.R +++ b/base/db/R/symmetric_setdiff.R @@ -1,12 +1,3 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2012 University of Illinois, NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# University of Illinois/NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- - #' Symmetric set difference of two data frames #' #' @param x,y `data.frame`s to compare diff --git a/base/db/R/take.samples.R b/base/db/R/take.samples.R index aa755a7e45e..06507a2cfba 100644 --- a/base/db/R/take.samples.R +++ b/base/db/R/take.samples.R @@ -1,13 +1,3 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2012 University of Illinois, NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# University of Illinois/NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- - -##-----------------------------------------------------------------------------# ##' sample from normal distribution, given summary stats ##' ##' @name take.samples diff --git a/base/db/R/utils_db.R b/base/db/R/utils_db.R index 938b504e294..f59a9c3c2fe 100644 --- a/base/db/R/utils_db.R +++ b/base/db/R/utils_db.R @@ -1,11 +1,3 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2012 University of Illinois, NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# University of Illinois/NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- .db.utils <- new.env() .db.utils$created <- 0 diff --git a/base/db/R/version.R b/base/db/R/version.R new file mode 100644 index 00000000000..0e58d885272 --- /dev/null +++ b/base/db/R/version.R @@ -0,0 +1,3 @@ +# Set at package install time, used by pecan.all::pecan_version() +# to identify development versions of packages +.build_hash <- Sys.getenv("PECAN_GIT_REV", "unknown") diff --git a/base/db/man/PEcAn.DB-package.Rd b/base/db/man/PEcAn.DB-package.Rd index 4f356fab80c..cdd80bdc1d9 100644 --- a/base/db/man/PEcAn.DB-package.Rd +++ b/base/db/man/PEcAn.DB-package.Rd @@ -23,7 +23,7 @@ Authors: \item Alexey Shiklomanov \email{alexey.shiklomanov@pnnl.gov} \item Tony Gardella \email{tonygard@bu.edu} \item Chris Black \email{chris@ckblack.org} - \item Liam Burke \email{liam.burke24@gmail.com>} + \item Liam Burke \email{liam.burke24@gmail.com} \item Ryan Kelly \email{rykelly@bu.edu} \item Dan Wang \item Carl Davidson \email{davids14@illinois.edu} diff --git a/base/db/man/assign.treatments.Rd b/base/db/man/assign.treatments.Rd index 30365dc8468..c1b6509217d 100644 --- a/base/db/man/assign.treatments.Rd +++ b/base/db/man/assign.treatments.Rd @@ -19,7 +19,7 @@ Change treatments to sequential integers Assigns all control treatments the same value, then assigns unique treatments within each site. Each site is required to have a control treatment. The algorithm (incorrectly) assumes that each site has a unique set of experimental -treatments. This assumption is required by the data in BETTdb that does not always consistently name treatments or quantity them in the managements table. Also it avoids having the need to estimate treatment by site interactions in the meta analysis model. This model uses data in the control treatment to estimate model parameters so the impact of the assumption is minimal. +treatments. This assumption is required by the data in BETYdb that does not always consistently name treatments or quantity them in the managements table. Also it avoids having the need to estimate treatment by site interactions in the meta analysis model. This model uses data in the control treatment to estimate model parameters so the impact of the assumption is minimal. } \author{ David LeBauer, Carl Davidson, Alexey Shiklomanov diff --git a/base/db/man/convert_input.Rd b/base/db/man/convert_input.Rd index 12197aecd74..466e46b73a3 100644 --- a/base/db/man/convert_input.Rd +++ b/base/db/man/convert_input.Rd @@ -16,7 +16,6 @@ convert_input( fcn, con = con, host, - browndog, write = TRUE, format.vars, overwrite = FALSE, @@ -56,8 +55,6 @@ convert_input( Currently only \code{host$name} and \code{host$Rbinary} are used by \code{convert_input}, but the whole list is passed to other functions} -\item{browndog}{List of information related to browndog conversion. NULL if browndog is not to be used for conversion} - \item{write}{Logical: Write new file records to the database?} \item{format.vars}{Passed on as arguments to \code{fcn}} diff --git a/base/db/tests/testthat.R b/base/db/tests/testthat.R index d2515cd48b8..03f6b840cbf 100644 --- a/base/db/tests/testthat.R +++ b/base/db/tests/testthat.R @@ -1,11 +1,3 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2012 University of Illinois, NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# University of Illinois/NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- library(testthat) library(PEcAn.DB) diff --git a/base/db/tests/testthat/test.assign.treatments.R b/base/db/tests/testthat/test.assign.treatments.R new file mode 100644 index 00000000000..a51c40f4804 --- /dev/null +++ b/base/db/tests/testthat/test.assign.treatments.R @@ -0,0 +1,33 @@ +test_that("`assign.treatments` correctly assigns control treatment", { + data <- data.frame( + site_id = c(1, 1, 2, 2, 3, 3), + citation_id = c(101, 101, 201, 201, 301, 301), + control = c(1, 0, 0, 1, 0, 0), + trt_id = NA + ) + + updated_data <- assign.treatments(data) + expect_equal(updated_data$trt_id, c("control", NA, NA, "control", "control", "control")) +}) + +test_that("`assign.treatments` gives an error if no control treatment is set for a site", { + data <- data.frame( + site_id = c(1, 1, 2, 2, 3, 3), + citation_id = c(101, 101, 201, 201, 301, 301), + control = c(0, 0, 0, 1, 0, 0), + trt_id = c(NA, NA, NA, NA, "not_control", NA) + ) + + expect_error(assign.treatments(data), "No control treatment set") +}) + +test_that("`drop.columns` able to drop specified columns from data", { + data <- data.frame( + id = c(1, 2, 3), + name = c("a", "b", "c"), + value = c(1.2, 4.5, 6.7) + ) + + updated_data <- drop.columns(data, c("name", "not_a_column")) + expect_equal(colnames(updated_data), c("id", "value")) +}) \ No newline at end of file diff --git a/base/db/tests/testthat/test.check.lists.R b/base/db/tests/testthat/test.check.lists.R new file mode 100644 index 00000000000..e434c29a2e7 --- /dev/null +++ b/base/db/tests/testthat/test.check.lists.R @@ -0,0 +1,23 @@ +test_that("`check.lists` returns false for appropriate cases", { + x <- data.frame(id = c(1, 2, 3)) + y <- data.frame(id = c(1, 2, 3, 4)) + + # for unequal number of rows + expect_false(check.lists(x, y)) + + # for wrong filename passed + expect_false(check.lists(x, y, filename = "wrong.csv")) + + # if x and y are actually unequal + y <- data.frame(id = c(1, 2, 4)) + expect_false(check.lists(x, y, filename = "species.csv")) +}) + +test_that("`check.lists` able to correctly work for matching data frames to lists read from csv files", { + withr::with_tempfile("tf", fileext = ".csv",{ + x <- data.frame(id = c(1, 2, 3)) + y <- data.frame(id = c(1, 2, 3)) + write.csv(y, file = tf) + expect_true(check.lists(x, read.csv(tf), filename = "species.csv")) + }) +}) \ No newline at end of file diff --git a/base/db/tests/testthat/test.contents_sanity.R b/base/db/tests/testthat/test.contents_sanity.R index 6b1ad0eee85..9a11d2bb7ae 100644 --- a/base/db/tests/testthat/test.contents_sanity.R +++ b/base/db/tests/testthat/test.contents_sanity.R @@ -1,11 +1,3 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2012 University of Illinois, NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# University of Illinois/NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- context("Basic Sanity tests for PEcAn functions that query BETYdb") test_that("append.covariates appends managements to yields",{ con <- check_db_test() diff --git a/base/db/tests/testthat/test.convert_input.R b/base/db/tests/testthat/test.convert_input.R new file mode 100644 index 00000000000..c2e7f49c1e9 --- /dev/null +++ b/base/db/tests/testthat/test.convert_input.R @@ -0,0 +1,38 @@ +test_that("`convert_input()` able to call the respective download function for a data item with the correct arguments", { + mocked_res <- mockery::mock(list(c("A", "B"))) + + mockery::stub(convert_input, 'dbfile.input.check', data.frame()) + mockery::stub(convert_input, 'db.query', data.frame(id = 1)) + mockery::stub(convert_input, 'PEcAn.remote::remote.execute.R', mocked_res) + mockery::stub(convert_input, 'purrr::map_dfr', data.frame(missing = c(FALSE), empty = c(FALSE))) + + convert_input( + input.id = NA, + outfolder = "test", + formatname = NULL, + mimetype = NULL, + site.id = 1, + start_date = "2011-01-01", + end_date = "2011-12-31", + pkg = 'PEcAn.data.atmosphere', + fcn = 'download.AmerifluxLBL', + con = NULL, + host = data.frame(name = "localhost"), + write = FALSE, + lat.in = 40, + lon.in = -88 + ) + + args <- mockery::mock_args(mocked_res) + expect_equal( + args[[1]]$script, + "PEcAn.data.atmosphere::download.AmerifluxLBL(lat.in=40, lon.in=-88, overwrite=FALSE, outfolder='test/', start_date='2011-01-01', end_date='2011-12-31')" + ) +}) + +test_that("`.get.file.deletion.commands()` able to return correct file deletion commands", { + res <- .get.file.deletion.commands(c("test")) + expect_equal(res$move.to.tmp, "dir.create(c('./tmp'), recursive=TRUE, showWarnings=FALSE); file.rename(from=c('test'), to=c('./tmp/test'))") + expect_equal(res$delete.tmp, "unlink(c('./tmp'), recursive=TRUE)") + expect_equal(res$replace.from.tmp, "file.rename(from=c('./tmp/test'), to=c('test'));unlink(c('./tmp'), recursive=TRUE)") +}) \ No newline at end of file diff --git a/base/db/tests/testthat/test.covariate.functions.R b/base/db/tests/testthat/test.covariate.functions.R new file mode 100644 index 00000000000..27fccd4eabe --- /dev/null +++ b/base/db/tests/testthat/test.covariate.functions.R @@ -0,0 +1,32 @@ +test_that("`append.covariate` able to append new column for covariates in given data based on id", { + data <- data.frame( + id = c(1, 2, 3, 4), + name = c("a", "b", "c", "d") + ) + covariates.data <- data.frame( + trait_id = c( 1, 2, 3, 4, 4), + level = c("A", "B", "C", "D", "E"), + name = c("a", "b", "c", "d", "e") + ) + updated_data <- append.covariate(data, "new_covariates_col", covariates.data) + expect_equal(updated_data$new_covariates_col, c("A", "B", "C", "D")) + expect_equal(colnames(updated_data), c("id", "new_covariates_col", "name")) +}) + +test_that("`filter_sunleaf_traits`able to filter out upper canopy leaves", { + data <- data.frame( + id = c(1, 2, 3, 4), + name = c("a", "b", "c", "d") + ) + covariates <- data.frame( + trait_id = c(1, 2, 3, 4), + name = c("leaf", "canopy_layer", "canopy_layer", "sunlight"), + level = c(1.2, 0.5, 0.7, 0.67) + ) + + updated_data <- filter_sunleaf_traits(data, covariates) + expect_equal(updated_data$name, c("a", "c", "d")) + + # temporary column gets removed + expect_equal(colnames(updated_data), c("id", "name")) +}) \ No newline at end of file diff --git a/base/db/tests/testthat/test.db.utils.R b/base/db/tests/testthat/test.db.utils.R index 85e4b17a661..7d8839357e2 100644 --- a/base/db/tests/testthat/test.db.utils.R +++ b/base/db/tests/testthat/test.db.utils.R @@ -1,11 +1,3 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2012 University of Illinois, NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# University of Illinois/NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- context("Testing utility functions") test_that("get.id works on some tables, and with different inputs", { diff --git a/base/db/tests/testthat/test.dbfiles.R b/base/db/tests/testthat/test.dbfiles.R new file mode 100644 index 00000000000..4880a074cd8 --- /dev/null +++ b/base/db/tests/testthat/test.dbfiles.R @@ -0,0 +1,150 @@ +test_that("`dbfile.input.insert()` able to create correct sql queries to insert a file into dbfiles table", { + + mocked_res <- mockery::mock(data.frame(), 1, data.frame(id = 2023)) + mockery::stub(dbfile.input.insert, 'get.id', 1) + mockery::stub(dbfile.input.insert, 'db.query', mocked_res) + mockery::stub( + dbfile.input.insert, + 'dbfile.check', + data.frame(id = 101, file_name = 'test-file', file_path = 'trait.data.Rdata') + ) + + res <- dbfile.input.insert( + in.path = 'trait.data.Rdata', + in.prefix = 'test-file', + siteid = 'test-site', + startdate = '2021-01-01', + enddate = '2022-01-01', + mimetype = 'application/x-RData', + formatname = 'traits', + con = NULL + ) + + expect_equal(res$dbfile.id, 101) + expect_equal(res$input.id, 2023) + args <- mockery::mock_args(mocked_res) + + # finding appropriate input + expect_true( + grepl( + "WHERE site_id=test-site AND name= 'trait.data.Rdata' AND format_id=1;", + args[[1]]$query + ) + ) + + # parent == "" and startdate not NULL + expect_true( + grepl( + "VALUES \\(test-site, 1, '2021-01-01', '2022-01-01','trait.data.Rdata'\\)", + args[[2]]$query + ) + ) + + # startdate not NULL + expect_true( + grepl( + "WHERE site_id=test-site AND format_id=1 AND start_date='2021-01-01' AND end_date='2022-01-01'", + args[[3]]$query + ) + ) +}) + +test_that("`dbfile.input.check()` able to form the right query to check the dbfiles table to see if a file exists as an input", { + + mocked_res <- mockery::mock(NULL) + mockery::stub(dbfile.input.check, 'get.id', 1) + mockery::stub(dbfile.input.check, 'db.query', mocked_res) + + dbfile.input.check('US-Akn', '2021-01-01', '2022-01-01', 'application/x-RData', 'traits', con = NULL) + args <- mockery::mock_args(mocked_res) + expect_true( + grepl( + "WHERE site_id=US-Akn AND format_id=1", + args[[1]]$query + ) + ) +}) + +test_that("`dbfile.posterior.insert()` able to make a correct query to insert a file into dbfiles table as a posterior", { + mocked_res <- mockery::mock(NULL, NULL, data.frame(id = 10)) + mockery::stub(dbfile.posterior.insert, 'get.id', 1) + mockery::stub(dbfile.posterior.insert, 'dbfile.insert', 1010) + mockery::stub(dbfile.posterior.insert, 'db.query', mocked_res) + + dbfile.posterior.insert('trait.data.Rdata', 'test-pft', 'application/x-RData', 'traits', con = NULL) + args <- mockery::mock_args(mocked_res) + expect_true(grepl("INSERT INTO posteriors \\(pft_id, format_id\\) VALUES \\(1, 1\\)", args[[2]]$query)) + +}) + +test_that("`dbfile.posterior.check()` able to form the correct query to retrieve correct posterior id to run further checks", { + mocked_res <- mockery::mock(data.frame(id = 2020)) + mockery::stub(dbfile.posterior.check, 'get.id', 1) + mockery::stub(dbfile.posterior.check, 'db.query', mocked_res) + mockery::stub(dbfile.posterior.check, 'dbfile.check', data.frame(id = 1, filename = 'test_1', pathname = 'path_1')) + + dbfile.posterior.check('testpft', 'application/x-RData', 'traits', con = NULL) + + args <- mockery::mock_args(mocked_res) + expect_true( + grepl( + "SELECT id FROM posteriors WHERE pft_id=1 AND format_id=1", + args[[1]]$query + ) + ) +}) + +test_that("`dbfile.insert()` able to add correct parameter values to the insert database query and return a file id", { + mocked_res <- mockery::mock(data.frame(), data.frame(id = 2020)) + mockery::stub(dbfile.insert, 'get.id', 1) + mockery::stub(dbfile.insert, 'db.query', mocked_res) + + res <- dbfile.insert(in.path = '/test/file/path', in.prefix = 'testfile.txt', 'Input', 7, con = NULL) + args <- mockery::mock_args(mocked_res) + expect_equal(res, 2020) + expect_true(grepl("VALUES \\('Input', 7, 'testfile.txt', '/test/file/path', 1\\) RETURNING id", args[[2]]$query)) +}) + +test_that("`dbfile.check()` able to return the most recent entries from `dbfiles` table associated with a container and machine", { + mockery::stub(dbfile.check, 'get.id', 1) + mockery::stub( + dbfile.check, + 'dplyr::tbl', + data.frame( + container_type = c('Input', 'Input', 'Model'), + container_id = c(7, 7, 7), + machine_id = c(1, 1, 2), + updated_at = c(20201112, 20210101, 20210102), + id = c(2, 3, 4), + filename = c('test_1', 'test_2', 'test_3'), + pathname = c('path_1', 'path_2', 'path_3') + ) + ) + res <- dbfile.check("Input", 7, con = NULL) + + expect_equal( + res, + data.frame(container_type = 'Input', container_id = 7, machine_id = 1, updated_at = 20210101, id = 3, filename = 'test_2', pathname = 'path_2') + ) +}) + +test_that("`dbfile.file()` able to return a correctly formed file path from entries in the `dbfiles` table for a particular container and machine", { + mockery::stub(dbfile.file, 'dbfile.check', data.frame(file_path = 'test/dir/path', file_name = 'test_file')) + expect_equal(dbfile.file('Input', 7, con = NULL), file.path('test/dir/path/test_file')) +}) + +test_that("`dbfile.id()` able to construct a correct database query to get id for a dbfile given the container type and filepath", { + mocked_res <- mockery::mock(data.frame(id = 1), data.frame(container_id = 2020)) + mockery::stub(dbfile.id, 'db.query', mocked_res) + + res <- dbfile.id('Model', '/usr/local/bin/sipnet', con = NULL) + args <- mockery::mock_args(mocked_res) + + expect_equal(res, 2020) + expect_true( + grepl( + "WHERE container_type='Model' AND file_path='/usr/local/bin' AND file_name='sipnet' AND machine_id=1", + args[[2]]$query + ) + ) +}) \ No newline at end of file diff --git a/base/db/tests/testthat/test.derive.traits.R b/base/db/tests/testthat/test.derive.traits.R index ae4b4ba93c9..b389b401f31 100644 --- a/base/db/tests/testthat/test.derive.traits.R +++ b/base/db/tests/testthat/test.derive.traits.R @@ -1,19 +1,3 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2012 University of Illinois, NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# University of Illinois/NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- -test_that("take.samples works",{ - expect_equal(take.samples(summary = data.frame(mean = 1, stat = NA)), 1) - set.seed(0) - test.sample <- take.samples(summary = data.frame(mean = 1, stat = 1), - sample.size = 2) - expect_equal(test.sample, c(2.26295428488079, 0.673766639294351)) -}) - test_that("derive.traits works",{ set.seed(0) input <- list(x = data.frame(mean = 1, stat = 1, n = 1)) diff --git a/base/db/tests/testthat/test.insert.R b/base/db/tests/testthat/test.insert.R index 95dd80a459e..168bb4f4ae6 100644 --- a/base/db/tests/testthat/test.insert.R +++ b/base/db/tests/testthat/test.insert.R @@ -47,3 +47,11 @@ test_that( } ) }) + +test_that("`match_colnames()` returns intersection of column names of a dataframe to a table", { + mockery::stub(match_colnames, 'dplyr::tbl', data.frame(id = 1, name = 'test', value = 1)) + expect_equal( + match_colnames(values = data.frame(id = 1, name = 'test'), table = 'test', con = 1), + c('id', 'name') + ) +}) \ No newline at end of file diff --git a/base/db/tests/testthat/test.met_inputs.R b/base/db/tests/testthat/test.met_inputs.R new file mode 100644 index 00000000000..49d75b7b379 --- /dev/null +++ b/base/db/tests/testthat/test.met_inputs.R @@ -0,0 +1,10 @@ +test_that("`met_inputs()` able to correctly place input parameters in the database query to retrieve available met inputs", { + mocked_res <- mockery::mock(0) + mockery::stub(met_inputs, 'db.query', mocked_res) + met_inputs(dbcon = NULL, site_id = 100, model_id = 200, hostname = "pecan") + args <- mockery::mock_args(mocked_res) + + expect_true( + grepl("inputs.site_id = \\$1.*machines.hostname = \\$2.*models.id = \\$3", args[[1]][[1]]) + ) +}) \ No newline at end of file diff --git a/base/db/tests/testthat/test.query.base.R b/base/db/tests/testthat/test.query.base.R index 43f60555bd2..301d10b7c90 100644 --- a/base/db/tests/testthat/test.query.base.R +++ b/base/db/tests/testthat/test.query.base.R @@ -1,11 +1,3 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2012 University of Illinois, NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# University of Illinois/NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- context("test db.query") test_that("db.query can execute a trivial SQL statement and return results",{ diff --git a/base/db/tests/testthat/test.query.data.R b/base/db/tests/testthat/test.query.data.R new file mode 100644 index 00000000000..87cafba5787 --- /dev/null +++ b/base/db/tests/testthat/test.query.data.R @@ -0,0 +1,20 @@ +test_that("`query.data()` able to correctly form the query and return result in SE", { + mocked_function <- mockery::mock(data.frame(Y=rep(1,5), stat=rep(1,5), n=rep(4,5), mean = rep(3,5), statname=c('SD', 'MSE', 'LSD', 'HSD', 'MSD'))) + mockery::stub(query.data, 'db.query', mocked_function, 2) + result <- query.data(con = 1, trait = "test_trait", spstr = "test_spstr", store.unconverted = TRUE) + args <- mockery::mock_args(mocked_function) + expect_true( + grepl( + paste( + "ST_X\\(ST_CENTROID\\(sites\\.geometry\\)\\) AS lon,", + "ST_Y\\(ST_CENTROID\\(sites\\.geometry\\)\\) AS lat,.*", + "where specie_id in \\(test_spstr\\).*", + "variables.name in \\('test_trait'\\);" + ), + args[[1]]$query + ) + ) + expect_equal(result$mean_unconverted, result$mean) + expect_equal(result$stat_unconverted, result$stat) + expect_equal(result$statname, rep('SE', 5)) +}) \ No newline at end of file diff --git a/base/db/tests/testthat/test.query.dplyr.R b/base/db/tests/testthat/test.query.dplyr.R new file mode 100644 index 00000000000..cc3e6436eea --- /dev/null +++ b/base/db/tests/testthat/test.query.dplyr.R @@ -0,0 +1,154 @@ +test_that("`fancy_scientific()` converts numbers to scientific expressions with proper formatting", { + result <- fancy_scientific(1234567890) + expect_equal(result, expression("1.234568" %*% 10^+9)) + + result <- fancy_scientific(0.00000123) + expect_equal(result, expression("1.23" %*% 10^-6)) + + result <- fancy_scientific(1e-20) + expect_equal(result, expression("1" %*% 10^-20)) +}) + +test_that("`dplyr.count()` returns the correct count of rows in a dataframe", { + + df <- data.frame( + x = c(1, 2, 3, 2, 1, 3), + y = c("a", "b", "a", "b", "a", "b") + ) + result <- dplyr.count(df) + expect_equal(result, 6) + + df <- data.frame() + result <- dplyr.count(df) + expect_equal(result, 0) +}) + +test_that("`dbHostInfo()` able to return correct host information", { + mockery::stub(dbHostInfo, 'db.query', data.frame(floor = 10)) + mockery::stub( + dbHostInfo, + 'dplyr::tbl', + data.frame( + data.frame( + sync_host_id = c(10, 11), + hostname = c("test_host_1", "test_host_2"), + sync_start = c("20190201", "20190201"), + sync_end = c("20200101", "20200101"), + sync_url = c("http://test_url_1", "http://test_url_2"), + sync_contact = c("test_contact_1", "test_contact_2") + ) + ) + ) + result <- dbHostInfo(bety = 1) + expect_equal(result$hostid, 10) + expect_equal(result$hostname, "test_host_1") + expect_equal(result$start, "20190201") + expect_equal(result$end, "20200101") + expect_equal(result$sync_url, "http://test_url_1") + expect_equal(result$sync_contact, "test_contact_1") +}) + +test_that("`workflows()` able to correctly return a list of workflows", { + mockery::stub( + workflows, + 'dbHostInfo', + list( + hostid = 10, + hostname = "test_host_1", + start = 3, + end = 10, + sync_url = "http://test_url_1", + sync_contact = "test_contact_1" + ) + ) + mockery::stub(workflows, 'dplyr::tbl', data.frame(workflow_id = c(1, 2, 3, 4, 5, 6))) + result <- workflows(bety = 1, ensemble = TRUE) + expect_equal(result, data.frame(workflow_id = c(3, 4, 5, 6))) +}) + +test_that("`workflow()` able to get a workflow data by id", { + mockery::stub( + workflow, + 'workflows', + data.frame(workflow_id = c(1, 2, 3, 4, 5, 6), workflow_name = c("A", "B", "C", "D", "E", "F")) + ) + result <- workflow(bety = 1, workflow_id = 3) + expect_equal(result, data.frame(workflow_id = 3, workflow_name = "C")) +}) + +test_that("`runs()` is able to get table of runs for a corresponding workflow", { + mockery::stub( + runs, + 'workflow', + data.frame( + workflow_id = c(1, 1), + folder = c("test_folder_1", "test_folder_2") + ) + ) + mocked_res <- mockery::mock( + data.frame( + id = c(1, 2, 3, 4, 5, 6), + workflow_id = c(1, 1, 3, 4, 5, 6) + ), + data.frame( + id = c(1, 2, 3), + ensemble_id = c(1, 1, 2) + ) + ) + mockery::stub(runs, 'dplyr::tbl', mocked_res) + result <- runs(bety = 1, workflow_id = 1) + expect_equal(result$run_id, c(1, 1, 2, 2, 3, 3)) + expect_equal(result$folder, c("test_folder_1", "test_folder_2", "test_folder_1", "test_folder_2", "test_folder_1", "test_folder_2")) +}) + +test_that("`get_workflow_ids()` able to get a vector of unique workflow IDs", { + mockery::stub( + get_workflow_ids, + 'workflows', + data.frame( + workflow_id = c(1, 2, 2, 3, 4, 4), + workflow_name = c("A", "B", "C", "D", "E", "F") + ) + ) + result <- get_workflow_ids(bety = 1, query = 1, all.ids = TRUE) + expect_equal(result, c(4, 3, 2, 1)) +}) + +test_that("`get_users()` ", { + mockery::stub(get_users, 'dplyr::tbl', data.frame(id = c(20200101, 20200102, 20240103))) + mockery::stub( + get_users, + 'dbHostInfo', + data.frame( + start = 20190201, + end = 20230101 + ) + ) + result <- get_users(bety = 1) + expect_equal(result, data.frame(id = c(20200101, 20200102))) +}) + +test_that("`get_run_ids()` able to get vector of run ids (in sorted order) for a given workflow ID", { + mockery::stub( + get_run_ids, + 'runs', + data.frame( + run_id = c(3, 1, 2), + folder = c("test_folder_1", "test_folder_2", "test_folder_3") + ) + ) + + result <- get_run_ids(bety = 1, workflow_id = 1) + expect_equal(result, c(1, 2, 3)) + + # if no run ids are found + mockery::stub(get_run_ids, 'runs', data.frame()) + result <- get_run_ids(bety = 1, workflow_id = 1) + expect_equal(result, c("No runs found")) +}) + +test_that("`var_names_all()` able get vector of variable names for a particular workflow and run ID removing variables not to be shown to user", { + mockery::stub(var_names_all, 'get_var_names', c('A', 'B', 'C', 'Year','FracJulianDay')) + result <- var_names_all(bety = 1, workflow_id = 1, run_id = 1) + expect_equal(result, c('A', 'B', 'C')) +}) \ No newline at end of file diff --git a/base/db/tests/testthat/test.query.file.path.R b/base/db/tests/testthat/test.query.file.path.R new file mode 100644 index 00000000000..1da98a019e6 --- /dev/null +++ b/base/db/tests/testthat/test.query.file.path.R @@ -0,0 +1,21 @@ +test_that("`query.file.path()`", { + # mock responses for subsequent calls to db.query + mocked_res <- mockery::mock(data.frame(id = '20210101'), data.frame(file_name = 'test_file', file_path = 'test_path')) + mockery::stub(query.file.path, 'db.query', mocked_res) + mockery::stub(query.file.path, 'PEcAn.remote::remote.execute.R', TRUE) + res <- query.file.path(input.id = 1, host_name = "pecan", con = 1) + args <- mockery::mock_args(mocked_res) + expect_true( + grepl( + "where hostname = 'pecan'", + args[[1]]$query + ) + ) + expect_true( + grepl( + "container_id = 1.* machine_id = 20210101", + args[[2]]$query + ) + ) + expect_equal(res, 'test_path/test_file') +}) \ No newline at end of file diff --git a/base/db/tests/testthat/test.query.priors.R b/base/db/tests/testthat/test.query.priors.R new file mode 100644 index 00000000000..4f0311a9e8f --- /dev/null +++ b/base/db/tests/testthat/test.query.priors.R @@ -0,0 +1,13 @@ +test_that("`query.priors()` correctly forms the query based on the parameters passed and returns priors",{ + mocked_function <- mockery::mock(data.frame(name = c("A", "B"), value = c(0.1, 0.2))) + mockery::stub(query.priors, 'db.query', mocked_function) + priors <- query.priors("ebifarm.pavi", c("SLA"), con = 1) + expect_equal(priors, c(0.1, 0.2)) + args <- mockery::mock_args(mocked_function) + expect_true( + grepl( + "WHERE pfts.id = ebifarm.pavi AND variables.name IN .* SLA", + args[[1]]$query + ) + ) +}) \ No newline at end of file diff --git a/base/db/tests/testthat/test.query.site.R b/base/db/tests/testthat/test.query.site.R new file mode 100644 index 00000000000..ec61c210581 --- /dev/null +++ b/base/db/tests/testthat/test.query.site.R @@ -0,0 +1,14 @@ +test_that("`query.site()` correctly forms the query and returns the site", { + mock_site_data <- data.frame(id = c(1), lon = c(1), lat = c(1)) + mocked_function <- mockery::mock(mock_site_data) + mockery::stub(query.site, 'db.query', mocked_function) + site <- query.site(1, con = 1) + expect_equal(site, mock_site_data) + args <- mockery::mock_args(mocked_function) + expect_true( + grepl( + "WHERE id = 1", + args[[1]]$query + ) + ) +}) \ No newline at end of file diff --git a/base/db/tests/testthat/test.query.yields.R b/base/db/tests/testthat/test.query.yields.R new file mode 100644 index 00000000000..c8a7905d51f --- /dev/null +++ b/base/db/tests/testthat/test.query.yields.R @@ -0,0 +1,43 @@ +test_that("`query.yields()` able to form the query correctly for trait set to 'yield' and with no extra columns", { + mocked_function <- mockery::mock(data.frame(Y=rep(1,5), stat=rep(1,5), n=rep(4,5), mean = rep(3,5), statname=c('SD', 'MSE', 'LSD', 'HSD', 'MSD'))) + mockery::stub(query.yields, 'db.query', mocked_function, 2) + result <- query.yields(spstr = "test_spstr", con = 1) + + args <- mockery::mock_args(mocked_function) + expect_true( + grepl( + paste0( + "month\\(yields.date\\) as month,treatments.control.*", + "where specie_id in \\(test_spstr\\);" + ), + args[[1]]$query + ) + ) +}) + +test_that("`query.yields()` throws an error if extra columns is not a string", { + expect_error( + query.yields(spstr = "test_spstr", con = 1, extra.columns = 1), + "`extra.columns` must be a string" + ) + expect_error( + query.yields(spstr = "test_spstr", con = 1, extra.columns = c("a","b")), + "`extra.columns` must be a string" + ) +}) + +test_that("`query.yields()` able to form the query correctly for trait not equal to 'yield' and with extra columns",{ + mocked_function <- mockery::mock(data.frame(Y=rep(1,5), stat=rep(1,5), n=rep(4,5), mean = rep(3,5), statname=c('SD', 'MSE', 'LSD', 'HSD', 'MSD'))) + mockery::stub(query.yields, 'db.query', mocked_function, 2) + result <- query.yields(trait = 'test_trait', spstr = "test_spstr", extra.columns = 'test_col', con = 1) + args <- mockery::mock_args(mocked_function) + expect_true( + grepl( + paste0( + "month\\(yields.date\\) as month,test_col,treatments.control.*", + "where specie_id in \\(test_spstr\\) and variables.name in \\('test_trait'\\)" + ), + args[[1]]$query + ) + ) +}) \ No newline at end of file diff --git a/base/db/tests/testthat/test.stamp.R b/base/db/tests/testthat/test.stamp.R new file mode 100644 index 00000000000..c566e073ee4 --- /dev/null +++ b/base/db/tests/testthat/test.stamp.R @@ -0,0 +1,15 @@ +test_that("`stamp_started()` able to correctly update the query for run_id passed", { + mock_function <- mockery::mock() + mockery::stub(stamp_started, 'PEcAn.DB::db.query', mock_function) + stamp_started(1, 1) + args <- mockery::mock_args(mock_function) + expect_true(grepl("started_at .* WHERE id = 1", args[[1]]$query)) +}) + +test_that("`stamp_finished()` able to correctly update the query for run_id passed", { + mock_function <- mockery::mock() + mockery::stub(stamp_finished, 'PEcAn.DB::db.query', mock_function) + stamp_finished(1, 1) + args <- mockery::mock_args(mock_function) + expect_true(grepl("finished_at .* WHERE id = 1", args[[1]]$query)) +}) \ No newline at end of file diff --git a/base/db/tests/testthat/test.take.samples.R b/base/db/tests/testthat/test.take.samples.R new file mode 100644 index 00000000000..dee2e479b44 --- /dev/null +++ b/base/db/tests/testthat/test.take.samples.R @@ -0,0 +1,15 @@ +test_that("`take.samples` returns mean when stat is NA", { + summary = list(mean = 10, stat = NA) + expect_equal(take.samples(summary = summary), summary$mean) +}) + +test_that("`take.samples` returns a vector of length sample.size for given summary stats", { + summary = list(mean = 10, stat = 10) + sample.size = 10 + expect_equal(length(take.samples(summary = summary, sample.size = sample.size)), sample.size) + + # Testing for exact return values for a simple example + test.sample <- take.samples(summary = data.frame(mean = 1, stat = 1), + sample.size = 2) + expect_equal(test.sample, c(2.26295428488079, 0.673766639294351)) +}) \ No newline at end of file diff --git a/base/db/tests/testthat/test.utils_db.R b/base/db/tests/testthat/test.utils_db.R new file mode 100644 index 00000000000..b859baea575 --- /dev/null +++ b/base/db/tests/testthat/test.utils_db.R @@ -0,0 +1,26 @@ +# test_that("`db.print.connections()` able to log out details about connections", { +# PEcAn.logger::logger.setUseConsole(TRUE, FALSE) +# on.exit(PEcAn.logger::logger.setUseConsole(TRUE, TRUE), add = TRUE) +# expect_output( +# db.print.connections(), +# paste0( +# ".* Created 0 connections and executed 0 queries .* ", +# "Created 0 connections and executed 0 queries.*", +# "No open database connections." +# ) +# ) +# }) + +test_that("`db.showQueries()` and `db.getShowQueries()` able to set and get the value of the .db.utils$showquery variable respectively", { + showquery_old <- db.getShowQueries() + on.exit(db.showQueries(showquery_old)) + db.showQueries(TRUE) + expect_equal(db.getShowQueries(), TRUE) +}) + +test_that("`default_hostname()` fixes hostname if the host is localhost", { + expect_equal(default_hostname("localhost"), PEcAn.remote::fqdn()) + + # if not localhost + expect_equal(default_hostname("pecan"), "pecan") +}) \ No newline at end of file diff --git a/base/logger/DESCRIPTION b/base/logger/DESCRIPTION index f5f2ab67b30..50bb54d1eee 100644 --- a/base/logger/DESCRIPTION +++ b/base/logger/DESCRIPTION @@ -1,7 +1,6 @@ Package: PEcAn.logger Title: Logger Functions for 'PEcAn' -Version: 1.8.1.9000 -Date: 2021-07-27 +Version: 1.8.2.9000 Authors@R: c(person("Rob", "Kooper", role = c("aut", "cre"), email = "kooper@illinois.edu"), person("Alexey", "Shiklomanov", role = c("aut"), @@ -25,8 +24,10 @@ URL: https://pecanproject.github.io/ Imports: utils, stringi -Suggests: testthat +Suggests: + testthat, + withr License: BSD_3_clause + file LICENSE Encoding: UTF-8 -RoxygenNote: 7.2.3 +RoxygenNote: 7.3.2 Roxygen: list(markdown = TRUE) diff --git a/base/logger/NEWS.md b/base/logger/NEWS.md index e41342ddd9f..7297277f276 100644 --- a/base/logger/NEWS.md +++ b/base/logger/NEWS.md @@ -1,4 +1,8 @@ -# PEcAn.logger (development version) +# PEcAn.logger 1.8.2.9000 + + + +# PEcAn.logger 1.8.2 - `PEcAn.logger::setLevel()` now invisibly returns the previously set logger level diff --git a/base/logger/R/logger.R b/base/logger/R/logger.R index 0cd4ed12dc7..ee0fc9c8e62 100644 --- a/base/logger/R/logger.R +++ b/base/logger/R/logger.R @@ -1,12 +1,3 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2012 University of Illinois, NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# University of Illinois/NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- - .utils.logger <- new.env() .utils.logger$filename <- NA .utils.logger$console <- TRUE diff --git a/base/logger/R/version.R b/base/logger/R/version.R new file mode 100644 index 00000000000..0e58d885272 --- /dev/null +++ b/base/logger/R/version.R @@ -0,0 +1,3 @@ +# Set at package install time, used by pecan.all::pecan_version() +# to identify development versions of packages +.build_hash <- Sys.getenv("PECAN_GIT_REV", "unknown") diff --git a/base/logger/tests/testthat/test.logger.R b/base/logger/tests/testthat/test.logger.R index e89298525f0..bc6f354dda9 100644 --- a/base/logger/tests/testthat/test.logger.R +++ b/base/logger/tests/testthat/test.logger.R @@ -1,18 +1,46 @@ -## ------------------------------------------------------------------------------- -## Copyright (c) 2012 University of Illinois, NCSA. -## All rights reserved. This program and the accompanying materials -## are made available under the terms of the -## University of Illinois/NCSA Open Source License -## which accompanies this distribution, and is available at -## http://opensource.ncsa.illinois.edu/license.html -##------------------------------------------------------------------------------- context("Testing Logger Settings") + +test_that("`logger.getLevelNumber` returns correct level number",{ + expect_equal(logger.getLevelNumber("all"), 0) + expect_equal(logger.getLevelNumber("debug"), 10) + expect_equal(logger.getLevelNumber("info"), 20) + expect_equal(logger.getLevelNumber("warn"), 30) + expect_equal(logger.getLevelNumber("error"), 40) + expect_equal(logger.getLevelNumber("severe"), 40) + expect_equal(logger.getLevelNumber("off"), 60) + + old_settings <- logger.setLevel("ERROR") + on.exit(logger.setLevel(old_settings), add = TRUE) + expect_equal(logger.getLevelNumber("INVALID"), 20) +}) + +test_that("`setWidth` works for different specified number of chars per line",{ + logger.setUseConsole(TRUE, FALSE) + on.exit(logger.setUseConsole(TRUE, TRUE), add = TRUE) + + expect_output(logger.info("A long error message that helps us understand what the error in the function is"), + "INFO \\[.*\\] : \\n A long error message that helps us understand what the error in the \\n function is ") + + old_width <- .utils.logger$width + logger.setWidth(10) + on.exit(logger.setWidth(old_width), add = TRUE) + expect_output(logger.info("A long error message that helps us understand what the error in the function is"), + "INFO \\[.*\\] : \\n A long \\n error \\n message \\n that \\n helps \\n us \\n understand \\n what \\n the \\n error \\n in the \\n function \\n is ") + + logger.setWidth(30) + expect_output(logger.info("A long error message that helps us understand what the error in the function is"), + "INFO \\[.*\\] : \\n A long error message that \\n helps us understand what \\n the error in the function \\n is ") + +}) + test_that("logger prints right messages, responds correctly to logger.setLevel",{ logger.setUseConsole(TRUE, FALSE) + on.exit(logger.setUseConsole(TRUE, TRUE), add = TRUE) - logger.setLevel("ALL") + old_settings <- logger.setLevel("ALL") + on.exit(logger.setLevel(old_settings), add = TRUE) expect_equal(logger.getLevel(), "ALL") expect_output(logger.debug("message"), "DEBUG \\[.*\\] : message") expect_output(logger.info("message"), "INFO \\[.*\\] : message") @@ -68,11 +96,26 @@ test_that("logger prints right messages, responds correctly to logger.setLevel", expect_silent(logger.info("message")) expect_silent(logger.warn("message")) expect_silent(logger.error("message")) + + logger.setQuitOnSevere(FALSE) + on.exit(logger.setQuitOnSevere(TRUE), add = TRUE) + expect_error(logger.severe("message"), "message") }) test_that("logger message labels match enclosing function", { logger.setUseConsole(console = TRUE, stderr = FALSE) + on.exit(logger.setUseConsole(console = TRUE, stderr = TRUE), add = TRUE) expect_output(identity(logger.info("message")), "[identity] : message", fixed = TRUE) expect_output(identity(PEcAn.logger::logger.info("message")), "[identity] : message", fixed = TRUE) }) + +test_that("`logger.message` able to redirect logging information to file set by `logger.setOutputFile`", { + on.exit(logger.setOutputFile(NA), add = TRUE) + f <- withr::with_tempfile("tf", { + logger.setOutputFile(tf) + logger.message("WARN", "message") + readLines(tf) + }) + expect_true(grepl(".*WARN \\[.*\\] : message", f)) +}) \ No newline at end of file diff --git a/base/logger/tests/testthat/test.logifnot.R b/base/logger/tests/testthat/test.logifnot.R new file mode 100644 index 00000000000..c8daa1a3de0 --- /dev/null +++ b/base/logger/tests/testthat/test.logifnot.R @@ -0,0 +1,75 @@ +test_that("`is_definitely_true` handles invalid conditionals passed",{ + expect_equal(is_definitely_true(NULL), FALSE) + expect_equal(is_definitely_true(""), FALSE) + expect_equal(is_definitely_true("pecan"), FALSE) +}) + +test_that("`is_definitely_true` handles single conditional statement correctly",{ + test_list <- list(1:10) + expect_equal(is_definitely_true("pecan"=="carya"), FALSE) + expect_equal(is_definitely_true(is.list(test_list)), TRUE) + expect_equal(is_definitely_true("pecan"=="pecan" && "pecan"!="bety" && is.list(test_list)), TRUE) + expect_equal(is_definitely_true("pecan"=="pecan" || ("pecan"=="bety" && is.list(test_list))), TRUE) +}) + +test_that("`check_conditions` handles multiple conditional statements correctly",{ + test_list <- list(1:10) + expect_equal(check_conditions(FALSE && TRUE, "pecan"=="pecan"), FALSE) + expect_equal(check_conditions("pecan"=="pecan", TRUE || FALSE && is.list(test_list)), TRUE) +}) + + +test_that( "logifnot prints right message based on the conditions passed, responds correctly to logger.setLevel",{ + logger.setUseConsole(TRUE, FALSE) + on.exit(logger.setUseConsole(TRUE, TRUE), add = TRUE) + + old_settings <- logger.setLevel("ALL") + on.exit(logger.setLevel(old_settings), add = TRUE) + + expect_output(debugifnot("message", FALSE), "DEBUG \\[.*\\] : message") + expect_output(infoifnot("message", FALSE), "INFO \\[.*\\] : message") + expect_output(warnifnot("message", FALSE), "WARN \\[.*\\] : message") + expect_output(errorifnot("message", FALSE), "ERROR \\[.*\\] : message") + expect_silent(debugifnot("message", TRUE)) + expect_silent(infoifnot("message", TRUE)) + expect_silent(warnifnot("message", TRUE)) + expect_silent(errorifnot("message", TRUE)) + + logger.setLevel("DEBUG") + expect_output(debugifnot("message", FALSE), "DEBUG \\[.*\\] : message") + expect_output(infoifnot("message", FALSE), "INFO \\[.*\\] : message") + expect_output(warnifnot("message", FALSE), "WARN \\[.*\\] : message") + expect_output(errorifnot("message", FALSE), "ERROR \\[.*\\] : message") + + logger.setLevel("INFO") + expect_silent(debugifnot("message", FALSE)) + expect_output(infoifnot("message", FALSE), "INFO \\[.*\\] : message") + expect_output(warnifnot("message", FALSE), "WARN \\[.*\\] : message") + expect_output(errorifnot("message", FALSE), "ERROR \\[.*\\] : message") + + logger.setLevel("WARN") + expect_silent(debugifnot("message", FALSE)) + expect_silent(infoifnot("message", FALSE)) + expect_output(warnifnot("message", FALSE), "WARN \\[.*\\] : message") + expect_output(errorifnot("message", FALSE), "ERROR \\[.*\\] : message") + + logger.setLevel("ERROR") + expect_silent(debugifnot("message", FALSE)) + expect_silent(infoifnot("message", FALSE)) + expect_silent(warnifnot("message", FALSE)) + expect_output(errorifnot("message", FALSE), "ERROR \\[.*\\] : message") + + logger.setLevel("OFF") + expect_silent(debugifnot("message", FALSE)) + expect_silent(infoifnot("message", FALSE)) + expect_silent(warnifnot("message", FALSE)) + expect_silent(errorifnot("message", FALSE)) + + logger.setQuitOnSevere(FALSE) + on.exit(logger.setQuitOnSevere(TRUE), add = TRUE) + expect_error(severeifnot("message", FALSE), "message") +}) + + + + diff --git a/base/logger/tests/testthat/test.print2string.R b/base/logger/tests/testthat/test.print2string.R new file mode 100644 index 00000000000..ec1aceac743 --- /dev/null +++ b/base/logger/tests/testthat/test.print2string.R @@ -0,0 +1,34 @@ +test_that("print2string returns correct output for a single value", { + output <- print2string(10) + expect_equal(output, "[1] 10") +}) + +test_that("print2string returns correct output for multiple values", { + output <- print2string(1:5) + expected_output <- "[1] 1 2 3 4 5" + expect_equal(output, expected_output) +}) + +test_that("print2string correctly handles additional arguments", { + output <- print2string(letters[1:3], n = Inf, na.print = "") + expected_output <- "[1] \"a\" \"b\" \"c\"" + expect_equal(output, expected_output) +}) + +test_that("print2string works for empty input value", { + output <- print2string(NULL) + expect_equal(output, "NULL") +}) + +test_that("print2string returns correct output for dataframes as input", { + df <- data.frame(test = c("download", "process", "plot"), status = c(TRUE, TRUE, FALSE)) + output <- print2string(df) + expected_output <- " test status\n1 download TRUE\n2 process TRUE\n3 plot FALSE" + expect_equal(output, expected_output) +}) + +test_that("print2string returns correct output for matrices as inputs", { + output <- print2string(mat <- matrix(1:6, nrow = 2, ncol = 3, byrow = TRUE)) + expected_output <- " [,1] [,2] [,3]\n[1,] 1 2 3\n[2,] 4 5 6" + expect_equal(output, expected_output) +}) diff --git a/base/qaqc/DESCRIPTION b/base/qaqc/DESCRIPTION index bd0a59fc1ad..041f957d25b 100644 --- a/base/qaqc/DESCRIPTION +++ b/base/qaqc/DESCRIPTION @@ -1,8 +1,7 @@ Package: PEcAn.qaqc Type: Package Title: QAQC -Version: 1.7.2 -Date: 2021-10-04 +Version: 1.7.3.9000 Authors@R: c(person("David", "LeBauer", role = c("aut", "cre"), email = "dlebauer@email.arizona.edu"), person("Tess", "McCabe", role = c("aut"), @@ -20,30 +19,22 @@ Imports: rlang, stats Suggests: - knitr, + knitr (>= 1.42), + mockery, mvbutils, PEcAn.BIOCRO, PEcAn.ED2, PEcAn.SIPNET, PEcAn.utils, - rmarkdown, + rmarkdown (>= 2.19), testthat (>= 3.0.4), - vdiffr (>= 1.0.2) -X-Comment-Remotes: - Installing vdiffr from GitHub because as of 2021-09-23, this is the - easiest way to get version >= 1.0.2 onto Docker images that use older - Rstudio Package Manager snapshots. - Ditto for testthat, because we need >= 3.0.4 for vdiffr compatibility. - When building on a system that finds these versions on CRAN, - OK to remove these Remotes lines and this comment. -Remotes: - github::r-lib/testthat@v3.1.6, - github::r-lib/vdiffr@v1.0.4 + vdiffr (>= 1.0.2), + withr License: BSD_3_clause + file LICENSE Copyright: Authors LazyLoad: yes LazyData: FALSE Encoding: UTF-8 -VignetteBuilder: knitr +VignetteBuilder: knitr, rmarkdown Config/testthat/edition: 3 -RoxygenNote: 7.2.3 +RoxygenNote: 7.3.2 diff --git a/base/qaqc/LICENSE b/base/qaqc/LICENSE index 9e38c2dc685..09ef35a60b4 100644 --- a/base/qaqc/LICENSE +++ b/base/qaqc/LICENSE @@ -1,29 +1,3 @@ -University of Illinois/NCSA Open Source License - -Copyright (c) 2012, University of Illinois, NCSA. All rights reserved. - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal with the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -- Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimers. -- Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimers in the - documentation and/or other materials provided with the distribution. -- Neither the names of University of Illinois, NCSA, nor the names - of its contributors may be used to endorse or promote products - derived from this Software without specific prior written permission. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR -ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF -CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE SOFTWARE. - +YEAR: 2024 +COPYRIGHT HOLDER: PEcAn Project +ORGANIZATION: PEcAn Project, authors affiliations diff --git a/base/qaqc/R/find_formats_without_inputs.R b/base/qaqc/R/find_formats_without_inputs.R index b6df894b0ac..1107213c926 100644 --- a/base/qaqc/R/find_formats_without_inputs.R +++ b/base/qaqc/R/find_formats_without_inputs.R @@ -5,13 +5,13 @@ ##' @param con database connection object ##' @param user_id_code Optional parameter to search by user_id ##' @param created_after Optional parameter to search by creation date. Date must be in form 'YYYY-MM-DD'. -##' @param created_before Optional parameter to search by creation date. Can be used in conjunciton with created_after to specify a spesific window. Date must be in form 'YYYY-MM-DD'. -##' @param updated_after Optional parameter to search all entried updated after a certain date. Date must be in form 'YYYY-MM-DD'. -##' @param updated_before Optional parameter to search all entried updated before a certain date. Date must be in form 'YYYY-MM-DD'. +##' @param created_before Optional parameter to search by creation date. Can be used in conjunction with created_after to specify a specific window. Date must be in form 'YYYY-MM-DD'. +##' @param updated_after Optional parameter to search all entries updated after a certain date. Date must be in form 'YYYY-MM-DD'. +##' @param updated_before Optional parameter to search all entries updated before a certain date. Date must be in form 'YYYY-MM-DD'. ##' @param con connection the the bety database ##' ##' -##' @description This is a fucntion that returns a dataframe with all of the format entries that have no assosiated input records. +##' @description This is a function that returns a dataframe with all of the format entries that have no associated input records. ##' ##' For more information on how to use this function see the "Pre-release-database-cleanup" script in the 'vignettes' folder ##' or look at the README diff --git a/base/qaqc/R/find_inputs_without_formats.R b/base/qaqc/R/find_inputs_without_formats.R index ba5ff91a2ba..0c21fd71b2d 100644 --- a/base/qaqc/R/find_inputs_without_formats.R +++ b/base/qaqc/R/find_inputs_without_formats.R @@ -7,7 +7,7 @@ ##' @param con connection the the bety database ##' ##' -##' @description This is a function that returns a dataframe with all of the input entries that have no assosiated format records. +##' @description This is a function that returns a dataframe with all of the input entries that have no associated format records. ##' This is very rare in the database. ##' ##' For more information on how to use this function see the "Pre-release-database-cleanup" script in the 'vignettes' folder diff --git a/base/qaqc/R/get_table_column_names.R b/base/qaqc/R/get_table_column_names.R index 8a5a8bfe5ac..9c90bc81852 100644 --- a/base/qaqc/R/get_table_column_names.R +++ b/base/qaqc/R/get_table_column_names.R @@ -1,38 +1,35 @@ ##' get_table_column_names ##' @author Tempest McCabe -##' -##' @param table a table that is output from one of the find_* functions, -##' or a data.frame containing the output from multiple find_* functions. Could also be a vector of table names. -##' @param con a connection to the bety database. -##' -##' -##' @description This function will return a vector of the column names for a given table(s) in the bety database. -##' Useful for choseing which columns to include in the written-out table. +##' +##' @param table a table that is output from one of the find_* functions, +##' or a data.frame containing the output from multiple find_* functions. Could also be a vector of table names. +##' @param con a connection to the bety database. +##' +##' +##' @description This function will return a vector of the column names for a given table(s) in the bety database. +##' Useful for choosing which columns to include in the written-out table. ##' ##' For more information on how to use this function see the "Pre-release-database-cleanup" script in the 'vignettes' folder ##' or look at the README ##' @export -get_table_column_names<-function(table, con){ - - if(is.data.frame(table)){ - if("table_name" %in% names(table)){ - table_factor<-as.factor(table$table_name) - table_name<-levels(table_factor) - }else{ +get_table_column_names <- function(table, con) { + if (is.data.frame(table)) { + if ("table_name" %in% names(table)) { + table_factor <- as.factor(table$table_name) + table_name <- levels(table_factor) + } else { PEcAn.logger::logger.severe("Table needs either a 'table_names' column or be a character vector of table names") } - - }else if(is.vector(table)){ - table_name<-table - }else{ + } else if (is.vector(table)) { + table_name <- table + } else { PEcAn.logger::logger.severe("table must either be a dataframe or a vector") } - column_names<-list() - for(i in seq_along(table_name)){ - query <- PEcAn.DB::db.query(paste("SELECT * from", table_name, "LIMIT 1"), con=con) - column_names[[i]]<-colnames(query) - names(column_names)<-table_name - + column_names <- list() + for (i in seq_along(table_name)) { + query <- PEcAn.DB::db.query(paste("SELECT * from", table_name, "LIMIT 1"), con = con) + column_names[[i]] <- colnames(query) + names(column_names) <- table_name } return(column_names) } diff --git a/base/qaqc/R/taylor.plot.R b/base/qaqc/R/taylor.plot.R index 00986770518..ae00f76030e 100644 --- a/base/qaqc/R/taylor.plot.R +++ b/base/qaqc/R/taylor.plot.R @@ -1,12 +1,3 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2012 University of Illinois, NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# University of Illinois/NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- - ##' Plot taylor diagram for benchmark sites ##' ##' @param dataset data to plot @@ -20,7 +11,7 @@ new.taylor <- function(dataset, runid, siteid) { mod <- dataset[sitemask, paste0("model", run)] R <- stats::cor(obs, mod, use = "pairwise") sd.f <- stats::sd(mod) - lab <- paste(paste0("model", run), paste0("site", si)) + lab <- paste(paste0("model", run), paste0("site", si)) if (run == runid[1] && si == siteid[1]) { plotrix::taylor.diagram(obs, mod, pos.cor = FALSE) } else { diff --git a/base/qaqc/R/version.R b/base/qaqc/R/version.R new file mode 100644 index 00000000000..0e58d885272 --- /dev/null +++ b/base/qaqc/R/version.R @@ -0,0 +1,3 @@ +# Set at package install time, used by pecan.all::pecan_version() +# to identify development versions of packages +.build_hash <- Sys.getenv("PECAN_GIT_REV", "unknown") diff --git a/base/qaqc/R/write_out_table.R b/base/qaqc/R/write_out_table.R index 75ec8879ff7..dd9de9e5cee 100644 --- a/base/qaqc/R/write_out_table.R +++ b/base/qaqc/R/write_out_table.R @@ -1,30 +1,29 @@ ##' write_out_table ##' @author Tempest McCabe -##' -##' @param table a table that is output from one of the find_* fucntions +##' +##' @param table a table that is output from one of the find_* functions ##' @param table_name name of table ##' @param outdir path to folder into which the editable table will be written -##' @param relevant_table_columns a list of all columns to keep. ID and table name will be automatically included. -##' -##' -##' @description This is a fucntion that returns a dataframe with all of the format entries that have no assosiated input records. +##' @param relevant_table_columns a list of all columns to keep. ID and table name will be automatically included. +##' +##' +##' @description This is a function that returns a dataframe with all of the format entries that have no associated input records. ##' ##' For more information on how to use this function see the "Pre-release-database-cleanup" script in the 'vignettes' folder ##' or look at the README ##' @export -write_out_table<-function(table,table_name,outdir, relevant_table_columns){ - - if(!"id" %in% relevant_table_columns){ - relevant_table_columns<-c(relevant_table_columns, "id") +write_out_table <- function(table, table_name, outdir, relevant_table_columns) { + if (!"id" %in% relevant_table_columns) { + relevant_table_columns <- c(relevant_table_columns, "id") } - if(!"table_name" %in% relevant_table_columns){ - relevant_table_columns<-c(relevant_table_columns, "id", "table_name") + if (!"table_name" %in% relevant_table_columns) { + relevant_table_columns <- c(relevant_table_columns, "table_name") } - if(!any(c("id", "table_name") %in% names(table))){ + if (!any(c("id", "table_name") %in% names(table))) { PEcAn.logger::logger.severe("table provided doesn't have a table_name or id column or both. ") } - - - table<-table[ , (relevant_table_columns)] - utils::write.table(table, file=paste(outdir,"/query_of_",table_name ,sep=""),row.names = FALSE,sep="|") -} \ No newline at end of file + + + table <- table[, (relevant_table_columns)] + utils::write.table(table, file = paste(outdir, "/query_of_", table_name, sep = ""), row.names = FALSE, sep = "|") +} diff --git a/base/qaqc/inst/extdata/extdata.R b/base/qaqc/inst/extdata/extdata.R index 8d1ca510942..a4d63684019 100644 --- a/base/qaqc/inst/extdata/extdata.R +++ b/base/qaqc/inst/extdata/extdata.R @@ -1,13 +1,3 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2012 University of Illinois, NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# University of Illinois/NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- - -#--------------------------------------------------------------------------------------------------# ##' ##' generate benchmarking inputs table ##' @title Generate benchmarking inputs diff --git a/base/qaqc/man/find_formats_without_inputs.Rd b/base/qaqc/man/find_formats_without_inputs.Rd index c20d769cd1c..7cc57d31d6a 100644 --- a/base/qaqc/man/find_formats_without_inputs.Rd +++ b/base/qaqc/man/find_formats_without_inputs.Rd @@ -20,14 +20,14 @@ find_formats_without_inputs( \item{created_after}{Optional parameter to search by creation date. Date must be in form 'YYYY-MM-DD'.} -\item{updated_after}{Optional parameter to search all entried updated after a certain date. Date must be in form 'YYYY-MM-DD'.} +\item{updated_after}{Optional parameter to search all entries updated after a certain date. Date must be in form 'YYYY-MM-DD'.} -\item{created_before}{Optional parameter to search by creation date. Can be used in conjunciton with created_after to specify a spesific window. Date must be in form 'YYYY-MM-DD'.} +\item{created_before}{Optional parameter to search by creation date. Can be used in conjunction with created_after to specify a specific window. Date must be in form 'YYYY-MM-DD'.} -\item{updated_before}{Optional parameter to search all entried updated before a certain date. Date must be in form 'YYYY-MM-DD'.} +\item{updated_before}{Optional parameter to search all entries updated before a certain date. Date must be in form 'YYYY-MM-DD'.} } \description{ -This is a fucntion that returns a dataframe with all of the format entries that have no assosiated input records. +This is a function that returns a dataframe with all of the format entries that have no associated input records. For more information on how to use this function see the "Pre-release-database-cleanup" script in the 'vignettes' folder or look at the README diff --git a/base/qaqc/man/find_inputs_without_formats.Rd b/base/qaqc/man/find_inputs_without_formats.Rd index 1ceb2cc315d..ae19ac4a686 100644 --- a/base/qaqc/man/find_inputs_without_formats.Rd +++ b/base/qaqc/man/find_inputs_without_formats.Rd @@ -23,7 +23,7 @@ find_inputs_without_formats( \item{updated_before, updated_after}{Optional parameter to search all entried updated after a certain date. Date must be in form 'YYYY-MM-DD'} } \description{ -This is a function that returns a dataframe with all of the input entries that have no assosiated format records. +This is a function that returns a dataframe with all of the input entries that have no associated format records. This is very rare in the database. For more information on how to use this function see the "Pre-release-database-cleanup" script in the 'vignettes' folder diff --git a/base/qaqc/man/get_table_column_names.Rd b/base/qaqc/man/get_table_column_names.Rd index 44f31c778a5..3757ec959fd 100644 --- a/base/qaqc/man/get_table_column_names.Rd +++ b/base/qaqc/man/get_table_column_names.Rd @@ -7,14 +7,14 @@ get_table_column_names(table, con) } \arguments{ -\item{table}{a table that is output from one of the find_* functions, +\item{table}{a table that is output from one of the find_* functions, or a data.frame containing the output from multiple find_* functions. Could also be a vector of table names.} \item{con}{a connection to the bety database.} } \description{ -This function will return a vector of the column names for a given table(s) in the bety database. -Useful for choseing which columns to include in the written-out table. +This function will return a vector of the column names for a given table(s) in the bety database. +Useful for choosing which columns to include in the written-out table. For more information on how to use this function see the "Pre-release-database-cleanup" script in the 'vignettes' folder or look at the README diff --git a/base/qaqc/man/write_out_table.Rd b/base/qaqc/man/write_out_table.Rd index 2eaf9dc57d5..2140fae26eb 100644 --- a/base/qaqc/man/write_out_table.Rd +++ b/base/qaqc/man/write_out_table.Rd @@ -7,7 +7,7 @@ write_out_table(table, table_name, outdir, relevant_table_columns) } \arguments{ -\item{table}{a table that is output from one of the find_* fucntions} +\item{table}{a table that is output from one of the find_* functions} \item{table_name}{name of table} @@ -16,7 +16,7 @@ write_out_table(table, table_name, outdir, relevant_table_columns) \item{relevant_table_columns}{a list of all columns to keep. ID and table name will be automatically included.} } \description{ -This is a fucntion that returns a dataframe with all of the format entries that have no assosiated input records. +This is a function that returns a dataframe with all of the format entries that have no associated input records. For more information on how to use this function see the "Pre-release-database-cleanup" script in the 'vignettes' folder or look at the README diff --git a/base/qaqc/tests/Rcheck_reference.log b/base/qaqc/tests/Rcheck_reference.log index dcbf435c84f..fa634d61096 100644 --- a/base/qaqc/tests/Rcheck_reference.log +++ b/base/qaqc/tests/Rcheck_reference.log @@ -21,9 +21,6 @@ * checking package directory ... OK * checking DESCRIPTION meta-information ... NOTE Malformed Description field: should contain one or more complete sentences. -Authors@R field gives no person with name and roles. -Authors@R field gives no person with maintainer role, valid email -address and non-empty name. * checking top-level files ... NOTE Non-standard file/directory found at top level: ‘README.Rmd’ diff --git a/base/qaqc/tests/testthat.R b/base/qaqc/tests/testthat.R index f046421f15b..bf5de661ff5 100644 --- a/base/qaqc/tests/testthat.R +++ b/base/qaqc/tests/testthat.R @@ -1,11 +1,3 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2012 University of Illinois, NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# University of Illinois/NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- library(testthat) library(PEcAn.qaqc) diff --git a/base/qaqc/tests/testthat/test-taylorplot.R b/base/qaqc/tests/testthat/test-taylorplot.R index 012a380bb3e..7acc7853bcc 100644 --- a/base/qaqc/tests/testthat/test-taylorplot.R +++ b/base/qaqc/tests/testthat/test-taylorplot.R @@ -1,12 +1,3 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2012 University of Illinois, NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# University of Illinois/NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- - test_that("taylor diagram", { set.seed(1) diff --git a/base/qaqc/tests/testthat/test.cull_database_entries.R b/base/qaqc/tests/testthat/test.cull_database_entries.R new file mode 100644 index 00000000000..0b81d294fa6 --- /dev/null +++ b/base/qaqc/tests/testthat/test.cull_database_entries.R @@ -0,0 +1,25 @@ +test_that("`cull_database_entries()` gives errors for faulty inputs",{ + expect_error( + cull_database_entries(outdir = 'test'), + "If a table object hasn't been provided, a file_name must be set." + ) + expect_error( + cull_database_entries(table = 'test_table', file_name = 'test_file', outdir = 'test'), + "table and file_name cannot both be provided." + ) + expect_error( + cull_database_entries(table = 'test_table', outdir = 'test'), + "Please provide a table_name" + ) +}) + +test_that("`cull_database_entries()` able to correctly add logs to the output file", { + withr::with_dir(tempdir(), { + mockery::stub(cull_database_entries, 'PEcAn.DB::db.query', 'test_log') + dir <- getwd() + cull_database_entries(table = data.frame(id = 1), table_name = 'test', con = 1, outdir = dir) + expect_true(file.exists(paste0(dir, "/deletion_log_of_test"))) + file_data <- readLines(paste0(dir, "/deletion_log_of_test")) + expect_equal(grepl("test_log", file_data), c(TRUE, TRUE)) + }) +}) \ No newline at end of file diff --git a/base/qaqc/tests/testthat/test.find_formats_without_inputs.R b/base/qaqc/tests/testthat/test.find_formats_without_inputs.R new file mode 100644 index 00000000000..f61af2368b5 --- /dev/null +++ b/base/qaqc/tests/testthat/test.find_formats_without_inputs.R @@ -0,0 +1,14 @@ +test_that("`find_formats_without_inputs()` able to find formats with no input record",{ + format_command_mock <- data.frame(user_id = '2020', created_at = '2001-01-01', updated_at = '2010-01-01') + input_command_mock <- data.frame(format_id = '2000', user_id = '2021', created_at = '2002-01-02', updated_at = '2012-01-02') + mocked_res <- mockery::mock(input_command_mock, format_command_mock) + mockery::stub(find_formats_without_inputs, 'dplyr::tbl', mocked_res) + + res <- find_formats_without_inputs( + con = NULL, user_id_code = '2020', created_after = '2000-01-01', updated_after = '2009-01-01', created_before = '2002-01-01', updated_before = '2011-01-01' + ) + expect_equal( + res, + data.frame(id = '2020', created_at = '2001-01-01', updated_at = '2010-01-01', table_name = "formats") + ) +}) \ No newline at end of file diff --git a/base/qaqc/tests/testthat/test.find_inputs_without_formats.R b/base/qaqc/tests/testthat/test.find_inputs_without_formats.R new file mode 100644 index 00000000000..75c4b86d36b --- /dev/null +++ b/base/qaqc/tests/testthat/test.find_inputs_without_formats.R @@ -0,0 +1,13 @@ +test_that("`find_inputs_without_formats()` able to find inputs with no format records", { + input_command_mock <- data.frame(format_id = '2020', user_id = '2020', created_at = '2001-01-01', updated_at = '2010-01-01') + format_command_mock <- data.frame(user_id = '2021', created_at = '2002-01-02', updated_at = '2012-01-02') + mocked_res <- mockery::mock(input_command_mock, format_command_mock) + mockery::stub(find_inputs_without_formats, 'dplyr::tbl', mocked_res) + res <- find_inputs_without_formats( + con = NULL, user_id = '2020', created_after = '2000-01-01', updated_after = '2009-01-01', created_before = '2002-01-01', updated_before = '2011-01-01' + ) + expect_equal( + res, + data.frame(id = '2020', user_id = '2020',created_at = '2001-01-01', updated_at = '2010-01-01', table_name = "inputs") + ) +}) \ No newline at end of file diff --git a/base/qaqc/tests/testthat/test.get_table_column_names.R b/base/qaqc/tests/testthat/test.get_table_column_names.R new file mode 100644 index 00000000000..8a313365692 --- /dev/null +++ b/base/qaqc/tests/testthat/test.get_table_column_names.R @@ -0,0 +1,8 @@ +test_that("`get_table_column_names()` able to return the column names of a table as a list",{ + mocked_res <- mockery::mock(data.frame(head1 = 1, head2 = 2)) + mockery::stub(get_table_column_names, 'PEcAn.DB::db.query', mocked_res) + res <- get_table_column_names(table = data.frame(table_name = 'test_table'), con = 1) + args <- mockery::mock_args(mocked_res) + expect_equal(args[[1]][[1]], "SELECT * from test_table LIMIT 1") + expect_equal(res, list(test_table = c("head1", "head2"))) +}) \ No newline at end of file diff --git a/base/qaqc/tests/testthat/test.write_out_table.R b/base/qaqc/tests/testthat/test.write_out_table.R new file mode 100644 index 00000000000..fb4cedd6ed5 --- /dev/null +++ b/base/qaqc/tests/testthat/test.write_out_table.R @@ -0,0 +1,11 @@ +test_that("`write_out_table()` able to create and update output file with relevant data",{ + withr::with_dir(tempdir(), { + dir <- getwd() + write_out_table( + table = data.frame(id = 1, table_name = 'test'), table_name = 'test', relevant_table_columns = c(), outdir = dir + ) + expect_true(file.exists(paste0(dir, "/query_of_test"))) + file_data <- readLines(paste0(dir, "/query_of_test")) + expect_equal(grepl("test", file_data), c(FALSE, TRUE)) + }) +}) \ No newline at end of file diff --git a/base/qaqc/vignettes/function_relationships.Rmd b/base/qaqc/vignettes/function_relationships.Rmd index 1de4de04f7b..9c751fca9bb 100644 --- a/base/qaqc/vignettes/function_relationships.Rmd +++ b/base/qaqc/vignettes/function_relationships.Rmd @@ -6,17 +6,6 @@ vignette: > %\VignetteEngine{knitr::rmarkdown} --- - - This some code helps to visualize the interdependence of functions within PEcAn diff --git a/base/qaqc/vignettes/module_output.Rmd b/base/qaqc/vignettes/module_output.Rmd index 4ab3566b0ca..9d9ec773f2d 100644 --- a/base/qaqc/vignettes/module_output.Rmd +++ b/base/qaqc/vignettes/module_output.Rmd @@ -6,17 +6,6 @@ vignette: > %\VignetteEngine{knitr::rmarkdown} --- - - To get a better understanding on what files are created where, Rob created a workflow as an SVG diagram. You can find the diagram at http://isda.ncsa.illinois.edu/~kooper/EBI/workflow.svg diff --git a/base/remote/DESCRIPTION b/base/remote/DESCRIPTION index 26449ff1662..196d97967d2 100644 --- a/base/remote/DESCRIPTION +++ b/base/remote/DESCRIPTION @@ -1,8 +1,7 @@ Package: PEcAn.remote Type: Package Title: PEcAn Model Execution Utilities -Version: 1.7.2 -Date: 2021-10-04 +Version: 1.8.0.9000 Authors@R: c(person("David", "LeBauer", role = c("aut"), email = "dlebauer@email.arizona.edu"), person("Rob", "Kooper", role = c("aut", "cre"), @@ -17,12 +16,17 @@ Authors@R: c(person("David", "LeBauer", role = c("aut"), Description: This package contains utilities for communicating with and executing code on local and remote hosts. In particular, it has PEcAn-specific utilities for starting ecosystem model runs. Imports: + dplyr, + foreach, + furrr, PEcAn.logger, httr, jsonlite, urltools Suggests: + doSNOW, getPass, + mockery, testthat, tools, withr @@ -30,4 +34,4 @@ License: BSD_3_clause + file LICENSE Encoding: UTF-8 LazyData: true Roxygen: list(markdown = TRUE) -RoxygenNote: 7.2.3 +RoxygenNote: 7.3.2 diff --git a/base/remote/LICENSE b/base/remote/LICENSE index 9e38c2dc685..09ef35a60b4 100644 --- a/base/remote/LICENSE +++ b/base/remote/LICENSE @@ -1,29 +1,3 @@ -University of Illinois/NCSA Open Source License - -Copyright (c) 2012, University of Illinois, NCSA. All rights reserved. - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal with the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -- Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimers. -- Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimers in the - documentation and/or other materials provided with the distribution. -- Neither the names of University of Illinois, NCSA, nor the names - of its contributors may be used to endorse or promote products - derived from this Software without specific prior written permission. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR -ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF -CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE SOFTWARE. - +YEAR: 2024 +COPYRIGHT HOLDER: PEcAn Project +ORGANIZATION: PEcAn Project, authors affiliations diff --git a/base/remote/NAMESPACE b/base/remote/NAMESPACE index 751b2c56519..0f8751522cf 100644 --- a/base/remote/NAMESPACE +++ b/base/remote/NAMESPACE @@ -4,8 +4,10 @@ export(check_model_run) export(fqdn) export(is.localhost) export(kill.tunnel) +export(merge_job_files) export(open_tunnel) export(qsub_get_jobid) +export(qsub_parallel) export(qsub_run_finished) export(rabbitmq_get_message) export(rabbitmq_post_message) @@ -20,3 +22,5 @@ export(start_qsub) export(start_rabbitmq) export(start_serial) export(test_remote) +importFrom(dplyr,"%>%") +importFrom(foreach,"%dopar%") diff --git a/base/remote/NEWS.md b/base/remote/NEWS.md index 92e48e088db..7450081eac2 100644 --- a/base/remote/NEWS.md +++ b/base/remote/NEWS.md @@ -1,5 +1,9 @@ -# PEcAn.remote 1.7.2.9000 +# PEcAn.remote 1.8.0.9000 +* PEcAn.remote is now distributed under the BSD 3-clause license instead of the NCSA Open Source license. + + +# PEcAn.remote 1.8.0 ## Added diff --git a/base/remote/R/fqdn.R b/base/remote/R/fqdn.R index 1830917adc6..38b12cd5e68 100644 --- a/base/remote/R/fqdn.R +++ b/base/remote/R/fqdn.R @@ -1,12 +1,3 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2012 University of Illinois, NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# University of Illinois/NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- - ##' Returns the fully qualified hostname. This is potentially different from `Sys.info()['nodename']` ##' which can return just the hostname part and not the domain as well. For example the machine ##' pecan.ncsa.illinois.edu will return just that as fqdn but only pecan for hostname. diff --git a/base/remote/R/merge_job_files.R b/base/remote/R/merge_job_files.R new file mode 100644 index 00000000000..6634b398bce --- /dev/null +++ b/base/remote/R/merge_job_files.R @@ -0,0 +1,41 @@ +#' Merge multiple job.sh files into one larger file. +#' +#' @param settings PEcAn.settings object with host section. +#' @param jobs_per_file the number of files you want to merge. +#' @param outdir output directory of merged job files. +#' +#' @return vector of the newly created filenames +#' @export +#' @author Dongchen Zhang +#' +merge_job_files <- function(settings, jobs_per_file = 10, outdir = NULL){ + # default outdir + if(is.null(outdir)){ + outdir <- file.path(settings$rundir, "merged_jobs") + } + # create folder or delete previous job files. + if(dir.exists(outdir)){ + unlink(list.files(outdir, recursive = T, full.names = T)) + }else{ + dir.create(outdir) + } + # merge job files. + run_list <- readLines(con = file.path(settings$rundir, "runs.txt")) + job_file_paths <- list.files(settings$host$rundir, pattern = "*.sh", recursive = T, full.names = T) + i <- 0 + files <- c() + while (i < length(job_file_paths)) { + jobs <- c() + for (j in 1:jobs_per_file) { + if((i+j) > length(job_file_paths)){ + break + } + jobs <- c(jobs, readLines(job_file_paths[i+j])) + } + writeLines(jobs, con = file.path(outdir, paste0("job_", i,".sh"))) + Sys.chmod(file.path(outdir, paste0("job_", i,".sh"))) + files <- c(files, file.path(outdir, paste0("job_", i,".sh"))) + i <- i + jobs_per_file + } + files +} diff --git a/base/remote/R/qsub_parallel.R b/base/remote/R/qsub_parallel.R new file mode 100644 index 00000000000..064672f11a1 --- /dev/null +++ b/base/remote/R/qsub_parallel.R @@ -0,0 +1,179 @@ +#' qsub_parallel +#' +#' @param settings pecan settings object +#' @param files allow submit jobs based on job.sh file paths. +#' @param prefix used for detecting if jobs are completed or not. +#' @param sleep time (in second) that we wait each time for the jobs to be completed. +#' @param hybrid A Boolean argument decide the way of detecting job completion. If it's TRUE then we will detect both the outputted files and job ids on the server. If it's FALSE then we will only detect the job ids on the server. +#' @export +#' @examples +#' \dontrun{ +#' qsub_parallel(settings) +#' } +#' @author Dongchen Zhang +#' +#' @importFrom foreach %dopar% +#' @importFrom dplyr %>% +qsub_parallel <- function(settings, files = NULL, prefix = "sipnet.out", sleep = 10, hybrid = TRUE) { + if("try-error" %in% class(try(find.package("doSNOW"), silent = T))){ + PEcAn.logger::logger.info("Package doSNOW is not installed! Please install it and rerun the function!") + return(0) + } + #declare variables within foreach section + run <- NULL + folder <- NULL + run_list <- readLines(con = file.path(settings$rundir, "runs.txt")) + is_local <- PEcAn.remote::is.localhost(settings$host) + is_qsub <- !is.null(settings$host$qsub) + is_rabbitmq <- !is.null(settings$host$rabbitmq) + # loop through runs and either call start run, or launch job on remote machine + # parallel submit jobs + cores <- parallel::detectCores() + cl <- parallel::makeCluster(cores) + doSNOW::registerDoSNOW(cl) + #progress bar + pb <- utils::txtProgressBar(min=1, max=ifelse(is.null(files), length(run_list), length(files)), style=3) + progress <- function(n) utils::setTxtProgressBar(pb, n) + opts <- list(progress=progress) + PEcAn.logger::logger.info("Submitting jobs!") + # if we want to submit jobs separately. + if(is.null(files)){ + if (is_qsub) { + jobids <- foreach::foreach(run = run_list, .packages="Kendall", .options.snow=opts, settings = rep(settings, length(run_list))) %dopar% { + run_id_string <- format(run, scientific = FALSE) + qsub <- settings$host$qsub + qsub <- gsub("@NAME@", paste0("PEcAn-", run_id_string), qsub) + qsub <- gsub("@STDOUT@", file.path(settings$host$outdir, run_id_string, "stdout.log"), qsub) + qsub <- gsub("@STDERR@", file.path(settings$host$outdir, run_id_string, "stderr.log"), qsub) + qsub <- strsplit(qsub, " (?=([^\"']*\"[^\"']*\")*[^\"']*$)", perl = TRUE) + # start the actual model run + cmd <- qsub[[1]] + if(PEcAn.remote::is.localhost(settings$host)){ + out <- system2(cmd, file.path(settings$host$rundir, run_id_string, "job.sh"), stdout = TRUE, stderr = TRUE) + }else{ + out <- PEcAn.remote::remote.execute.cmd(settings$host, cmd, file.path(settings$host$rundir, run_id_string, "job.sh"), stderr = TRUE) + } + jobid <- PEcAn.remote::qsub_get_jobid( + out = out[length(out)], + qsub.jobid = settings$host$qsub.jobid, + stop.on.error = TRUE) + return(jobid) + } + } else if (is_rabbitmq) { + out <- foreach::foreach(run = run_list, .packages="Kendall", .options.snow=opts, settings = rep(settings, length(run_list))) %dopar% { + run_id_string <- format(run, scientific = FALSE) + PEcAn.remote::start_rabbitmq(file.path(settings$host$rundir, run_id_string), settings$host$rabbitmq$uri, settings$host$rabbitmq$queue) + } + } + }else{ + # if we want to submit merged job files. + std_out <- file.path(settings$host$outdir, "merged_stdout") + if(!dir.exists(std_out)){ + dir.create(std_out) + }else{ + unlink(list.files(std_out, recursive = T, full.names = T)) + } + jobids <- foreach::foreach(file = files, .packages="Kendall", .options.snow=opts, settings = rep(settings, length(files))) %dopar% { + qsub <- settings$host$qsub + base_name <- basename(file) + num <- gsub("\\D", "", base_name) + name <- paste0("SDA", num) + qsub <- gsub("@NAME@", name, qsub) + qsub <- gsub("@STDOUT@", file.path(std_out, paste0("stdout", num, ".log")), qsub) + qsub <- gsub("@STDERR@", file.path(std_out, paste0("stderr", num, ".log")), qsub) + qsub <- strsplit(qsub, " (?=([^\"']*\"[^\"']*\")*[^\"']*$)", perl = TRUE) + cmd <- qsub[[1]] + if(PEcAn.remote::is.localhost(settings$host)){ + out <- system2(cmd, file, stdout = TRUE, stderr = TRUE) + }else{ + out <- PEcAn.remote::remote.execute.cmd(settings$host, cmd, file, stderr = TRUE) + } + jobid <- PEcAn.remote::qsub_get_jobid( + out = out[length(out)], + qsub.jobid = settings$host$qsub.jobid, + stop.on.error = TRUE) + return(jobid) + } + } + PEcAn.logger::logger.info("Jobs submitted!") + #check if jobs are completed + PEcAn.logger::logger.info("Checking the qsub jobs status!") + PEcAn.logger::logger.info(paste0("Checking the file ", prefix)) + ## setup progressbar + folders <- file.path(settings$host$outdir, run_list) + L_folder <- length(folders) + pb <- utils::txtProgressBar(min = 0, max = L_folder, style = 3) + #here we not only detect if the target files are generated. + #we also detect if the jobs are still existed on the server. + if (is_rabbitmq) { + while ((L_folder - length(folders)) < L_folder) { + Sys.sleep(sleep) + completed_folders <- foreach::foreach(folder = folders) %dopar% { + if(file.exists(file.path(folder, prefix))){ + return(folder) + } + } %>% unlist() + folders <- folders[which(!folders %in% completed_folders)] + pbi <- L_folder - length(folders) + utils::setTxtProgressBar(pb, pbi) + } + } else { + L_jobid <- length(jobids) + pb1 <- utils::txtProgressBar(min = 0, max = L_jobid, style = 3) + if (hybrid) { + while ((L_folder - length(folders)) < L_folder & + (L_jobid - length(jobids)) < L_jobid) { + Sys.sleep(sleep) + completed_folders <- foreach::foreach(folder = folders) %dopar% { + if(file.exists(file.path(folder, prefix))){ + return(folder) + } + } %>% unlist() + folders <- folders[which(!folders %in% completed_folders)] + + #or we can try detect if the jobs are still on the server. + #specify the host and qstat arguments for the future_map function. + host <- settings$host + qstat <- host$qstat + completed_jobs <- jobids %>% furrr::future_map(function(id) { + if (PEcAn.remote::qsub_run_finished( + run = id, + host = host, + qstat = qstat)) { + return(id) + } + }) %>% unlist() + jobids <- jobids[which(!jobids %in% completed_jobs)] + + #compare two progresses and set the maximum progress for the progress bar. + pbi <- L_folder - length(folders) + utils::setTxtProgressBar(pb, pbi) + } + } else { + #special case that only detect the job ids on the server. + while ((L_jobid - length(jobids)) < L_jobid) { + #detect if the jobs are still on the server. + #specify the host and qstat arguments for the future_map function. + Sys.sleep(sleep) + host <- settings$host + qstat <- host$qstat + completed_jobs <- jobids %>% furrr::future_map(function(id) { + if (PEcAn.remote::qsub_run_finished( + run = id, + host = host, + qstat = qstat)) { + return(id) + } + }) %>% unlist() + jobids <- jobids[which(!jobids %in% completed_jobs)] + + #compare two progresses and set the maximum progress for the progress bar. + pbi1 <- L_jobid - length(jobids) + utils::setTxtProgressBar(pb1, pbi1) + } + } + } + close(pb) + parallel::stopCluster(cl) + PEcAn.logger::logger.info("Completed!") +} \ No newline at end of file diff --git a/base/remote/R/remote.execute.R.R b/base/remote/R/remote.execute.R.R index cb7a74e878b..1ea80b8d031 100644 --- a/base/remote/R/remote.execute.R.R +++ b/base/remote/R/remote.execute.R.R @@ -6,10 +6,11 @@ #' #' @title Execute command remotely #' @param script the script to be invoked, as a list of commands. -#' @param args a character vector of arguments to command. #' @param host settings host list #' @param user the username to use for remote login #' @param verbose should the output be printed to the console +#' @param R Path to the R executable or binary file. +#' @param scratchdir Path to the scratch directory for temporary files during remote execution. #' @return the captured output of the command (both stdout and stderr) #' @author Rob Kooper #' @export diff --git a/base/remote/R/remote.execute.cmd.R b/base/remote/R/remote.execute.cmd.R index 2720c93ce7c..65670ea416b 100644 --- a/base/remote/R/remote.execute.cmd.R +++ b/base/remote/R/remote.execute.cmd.R @@ -5,7 +5,7 @@ #' machine it will execute the command locally without ssh. #' #' @title Execute command remotely -#' @param command the system command to be invoked, as a character string. +#' @param cmd the system command to be invoked, as a character string. #' @param host host structure to execute command on #' @param args a character vector of arguments to command. #' @param stderr should stderr be returned as well. diff --git a/base/remote/R/start.model.runs.R b/base/remote/R/start.model.runs.R index 623acab73ba..b3ee6e38290 100644 --- a/base/remote/R/start.model.runs.R +++ b/base/remote/R/start.model.runs.R @@ -1,12 +1,3 @@ -##------------------------------------------------------------------------------- -## Copyright (c) 2012 University of Illinois, NCSA. All rights reserved. This -## program and the accompanying materials are made available under the terms of -## the University of Illinois/NCSA Open Source License which accompanies this -## distribution, and is available at -## http://opensource.ncsa.illinois.edu/license.html -##------------------------------------------------------------------------------- - - ##' Start selected ecosystem model runs within PEcAn workflow ##' ##' DEFUNCT: This function has been moved to PEcAn.workflow::start_model_runs; diff --git a/base/remote/R/test_remote.R b/base/remote/R/test_remote.R index 863451bf976..7370144863a 100644 --- a/base/remote/R/test_remote.R +++ b/base/remote/R/test_remote.R @@ -1,6 +1,8 @@ #' Test remote execution #' #' @inheritParams remote.execute.cmd +#' +#' @param ... additional arguments. #' #' @return `TRUE` is remote execution is successful. #' If unsuccessful, depends on the value of `stderr`. diff --git a/base/remote/R/version.R b/base/remote/R/version.R new file mode 100644 index 00000000000..0e58d885272 --- /dev/null +++ b/base/remote/R/version.R @@ -0,0 +1,3 @@ +# Set at package install time, used by pecan.all::pecan_version() +# to identify development versions of packages +.build_hash <- Sys.getenv("PECAN_GIT_REV", "unknown") diff --git a/base/remote/man/merge_job_files.Rd b/base/remote/man/merge_job_files.Rd new file mode 100644 index 00000000000..31a39859469 --- /dev/null +++ b/base/remote/man/merge_job_files.Rd @@ -0,0 +1,24 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/merge_job_files.R +\name{merge_job_files} +\alias{merge_job_files} +\title{Merge multiple job.sh files into one larger file.} +\usage{ +merge_job_files(settings, jobs_per_file = 10, outdir = NULL) +} +\arguments{ +\item{settings}{PEcAn.settings object with host section.} + +\item{jobs_per_file}{the number of files you want to merge.} + +\item{outdir}{output directory of merged job files.} +} +\value{ +vector of the newly created filenames +} +\description{ +Merge multiple job.sh files into one larger file. +} +\author{ +Dongchen Zhang +} diff --git a/base/remote/man/qsub_parallel.Rd b/base/remote/man/qsub_parallel.Rd new file mode 100644 index 00000000000..274104b8139 --- /dev/null +++ b/base/remote/man/qsub_parallel.Rd @@ -0,0 +1,36 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/qsub_parallel.R +\name{qsub_parallel} +\alias{qsub_parallel} +\title{qsub_parallel} +\usage{ +qsub_parallel( + settings, + files = NULL, + prefix = "sipnet.out", + sleep = 10, + hybrid = TRUE +) +} +\arguments{ +\item{settings}{pecan settings object} + +\item{files}{allow submit jobs based on job.sh file paths.} + +\item{prefix}{used for detecting if jobs are completed or not.} + +\item{sleep}{time (in second) that we wait each time for the jobs to be completed.} + +\item{hybrid}{A Boolean argument decide the way of detecting job completion. If it's TRUE then we will detect both the outputted files and job ids on the server. If it's FALSE then we will only detect the job ids on the server.} +} +\description{ +qsub_parallel +} +\examples{ +\dontrun{ + qsub_parallel(settings) +} +} +\author{ +Dongchen Zhang +} diff --git a/base/remote/man/remote.execute.R.Rd b/base/remote/man/remote.execute.R.Rd index 08a472fc5ea..5c47303f527 100644 --- a/base/remote/man/remote.execute.R.Rd +++ b/base/remote/man/remote.execute.R.Rd @@ -22,7 +22,9 @@ remote.execute.R( \item{verbose}{should the output be printed to the console} -\item{args}{a character vector of arguments to command.} +\item{R}{Path to the R executable or binary file.} + +\item{scratchdir}{Path to the scratch directory for temporary files during remote execution.} } \value{ the captured output of the command (both stdout and stderr) diff --git a/base/remote/man/remote.execute.cmd.Rd b/base/remote/man/remote.execute.cmd.Rd index c45d7974587..d9a51e2c863 100644 --- a/base/remote/man/remote.execute.cmd.Rd +++ b/base/remote/man/remote.execute.cmd.Rd @@ -9,11 +9,11 @@ remote.execute.cmd(host, cmd, args = character(), stderr = FALSE) \arguments{ \item{host}{host structure to execute command on} +\item{cmd}{the system command to be invoked, as a character string.} + \item{args}{a character vector of arguments to command.} \item{stderr}{should stderr be returned as well.} - -\item{command}{the system command to be invoked, as a character string.} } \value{ the captured output of the command (both stdout and stderr) diff --git a/base/remote/man/test_remote.Rd b/base/remote/man/test_remote.Rd index 2b7319665da..f8244b0e6c2 100644 --- a/base/remote/man/test_remote.Rd +++ b/base/remote/man/test_remote.Rd @@ -10,6 +10,8 @@ test_remote(host, stderr = TRUE, ...) \item{host}{host structure to execute command on} \item{stderr}{should stderr be returned as well.} + +\item{...}{additional arguments.} } \value{ \code{TRUE} is remote execution is successful. diff --git a/base/remote/tests/testthat/test.check_model_run.R b/base/remote/tests/testthat/test.check_model_run.R new file mode 100644 index 00000000000..592c2209e7f --- /dev/null +++ b/base/remote/tests/testthat/test.check_model_run.R @@ -0,0 +1,10 @@ +test_that("`check_model_run()` gives correct output for the passed `out` value",{ + # failure + expect_error( + check_model_run(c("ERROR IN MODEL RUN")), + "Model run aborted with the following error:\nERROR IN MODEL RUN" + ) + + # success + expect_equal(check_model_run(c("SUCCESS")), TRUE) +}) \ No newline at end of file diff --git a/base/remote/tests/testthat/test.kill.tunnel.R b/base/remote/tests/testthat/test.kill.tunnel.R new file mode 100644 index 00000000000..c75c81f1ebe --- /dev/null +++ b/base/remote/tests/testthat/test.kill.tunnel.R @@ -0,0 +1,20 @@ +test_that("`kill.tunnel()` able to read the correct files and log the correct messages to kill tunnel for exe and data", { + withr::with_dir(tempdir(), { + mockery::stub(kill.tunnel, 'tools::pskill', TRUE) + mockery::stub(kill.tunnel, 'dirname', getwd()) + + # Kill tunnel to executable + settings <- list(host = list(tunnel = getwd())) + file_path <- file.path(getwd(), "pid") + file.create(file_path) + writeLines("1234", file_path) + expect_output(kill.tunnel(settings), "Killing tunnel with PID 1234") + + # Kill tunnel to data + settings <- list(host = list(data_tunnel = getwd())) + file_path <- file.path(getwd(), "pid") + file.create(file_path) + writeLines("3456", file_path) + expect_output(kill.tunnel(settings), "Killing tunnel with PID 3456") + }) +}) \ No newline at end of file diff --git a/base/remote/tests/testthat/test.localhost.R b/base/remote/tests/testthat/test.localhost.R index 7d494673ce4..801dc007f9f 100644 --- a/base/remote/tests/testthat/test.localhost.R +++ b/base/remote/tests/testthat/test.localhost.R @@ -1,11 +1,3 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2012 University of Illinois, NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# University of Illinois/NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- test_that('is.localhost works', { expect_true(is.localhost("localhost")) expect_true(is.localhost(fqdn())) diff --git a/base/remote/tests/testthat/test.rabbitmq.R b/base/remote/tests/testthat/test.rabbitmq.R new file mode 100644 index 00000000000..2d597a85e03 --- /dev/null +++ b/base/remote/tests/testthat/test.rabbitmq.R @@ -0,0 +1,79 @@ +test_that("`rabbitmq_parse_uri()` able to parse the rabbitmq uri to smaller variables", { + uri <- "amqp://guest:guest@localhost:15672/myvhost" + result <- rabbitmq_parse_uri(uri) + expect_equal(result$url, "http://localhost:15672/") + expect_equal(result$vhost$path, c("myvhost")) + expect_equal(result$username, "guest") + expect_equal(result$password, "guest") +}) + +test_that("`rabbitmq_send_message()` able to return content if the status code is between 200 and 299", { + mockery::stub(rabbitmq_send_message, 'httr::GET', data.frame(status_code = 200)) + mockery::stub(rabbitmq_send_message, 'httr::content', "test") + res <- rabbitmq_send_message(url = 'test/', auth = 'test', body = 'test', action = "GET") + expect_equal(res, "test") +}) + +test_that("`rabbitmq_send_message()` throws error where it should", { + PEcAn.logger::logger.setUseConsole(TRUE, FALSE) + on.exit(PEcAn.logger::logger.setUseConsole(TRUE, TRUE), add = TRUE) + + # errors if the action specified is unknown + expect_output( + rabbitmq_send_message(url = 'test/', auth = 'test', body = 'test', action = "TEST"), + "uknown action TEST" + ) + + # errors if the status code is 401 (username/password may be incorrect) + mockery::stub(rabbitmq_send_message, 'httr::GET', data.frame(status_code = 401)) + expect_output( + rabbitmq_send_message(url = 'test/', auth = 'test', body = 'test', action = "GET"), + "error sending message to rabbitmq" + ) + + # errors if the status code is outside of 200-299 and not 401 + mockery::stub(rabbitmq_send_message, 'httr::GET', data.frame(status_code = 501)) + mockery::stub(rabbitmq_send_message, 'httr::content', "test") + expect_output( + rabbitmq_send_message(url = 'test/', auth = 'test', body = 'test', action = "GET"), + "error sending message to rabbitmq \\[ 501 \\]" + ) +}) + +test_that("`rabbitmq_create_queue()` able to take care of condition if the queue already exists or not while creating a queue", { + mocked_res <- mockery::mock(NA, 'test') + mockery::stub(rabbitmq_create_queue, 'rabbitmq_send_message', mocked_res) + res <- rabbitmq_create_queue(url = 'test', auth = 'test', vhost = 'test', queue = 'test') + args <- mockery::mock_args(mocked_res) + expect_equal(res, TRUE) + expect_equal(args[[1]][[4]], 'GET') + expect_equal(args[[2]][[4]], 'PUT') +}) + +test_that("`rabbitmq_post_message()` passes the right params to send message to rabbitmq", { + mocked_res <- mockery::mock('test') + mockery::stub(rabbitmq_post_message, 'rabbitmq_send_message', mocked_res) + mockery::stub(rabbitmq_post_message, 'rabbitmq_create_queue', TRUE) + res <- rabbitmq_post_message(uri = 'amqp://guest:guest@localhost:15672/myvhost', queue = 'test_queue', message = 'test_message') + args <- mockery::mock_args(mocked_res) + expect_equal(res, 'test') + expect_equal(args[[1]][[1]], 'http://localhost:15672/api/exchanges/myvhost//publish') + expect_equal(args[[1]][[3]]$properties$delivery_mode, 2) + expect_equal(args[[1]][[3]]$routing_key, 'test_queue') + expect_equal(args[[1]][[3]]$payload, jsonlite::toJSON('test_message', auto_unbox = TRUE)) + expect_equal(args[[1]][[3]]$payload_encoding, 'string') + expect_equal(args[[1]][[4]], 'POST') +}) + +test_that("`rabbitmq_get_message()` passes the right params to send message to rabbitmq", { + mocked_res <- mockery::mock(NA) + mockery::stub(rabbitmq_get_message, 'rabbitmq_send_message', mocked_res) + mockery::stub(rabbitmq_get_message, 'rabbitmq_create_queue', TRUE) + res <- rabbitmq_get_message(uri = 'amqp://guest:guest@localhost:15672/myvhost', queue = 'test_queue') + args <- mockery::mock_args(mocked_res) + expect_equal(args[[1]][[1]], 'http://localhost:15672/api/queues/myvhost/test_queue/get') + expect_equal(args[[1]][[3]]$count, 1) + expect_equal(args[[1]][[3]]$ackmode, 'ack_requeue_false') + expect_equal(args[[1]][[3]]$encoding, 'auto') + expect_equal(args[[1]][[4]], 'POST') +}) \ No newline at end of file diff --git a/base/remote/tests/testthat/test.remote.copy.from.R b/base/remote/tests/testthat/test.remote.copy.from.R new file mode 100644 index 00000000000..635b4ce5d84 --- /dev/null +++ b/base/remote/tests/testthat/test.remote.copy.from.R @@ -0,0 +1,12 @@ +test_that("`remote.copy.from()` constructs the correct system command to be executed for doing the copy", { + mocked_res <- mockery::mock(0) + mockery::stub(remote.copy.from, 'system2', mocked_res) + mockery::stub(remote.copy.from, 'file.exists', TRUE) + remote.copy.from(host = data.frame(name = 'pecan', tunnel = 'test_tunnel'), src = 'tmp/', dst = 'tmp/', delete = TRUE) + args <- mockery::mock_args(mocked_res) + expect_equal(args[[1]][[1]], 'rsync') + expect_equal( + args[[1]][[2]], + shQuote(c("-az", "-q", "--delete", "-e", "ssh -o ControlPath=\"test_tunnel\"", "pecan:tmp/", "tmp/")) + ) +}) \ No newline at end of file diff --git a/base/remote/tests/testthat/test.start_qsub.R b/base/remote/tests/testthat/test.start_qsub.R new file mode 100644 index 00000000000..dc54a6a97d9 --- /dev/null +++ b/base/remote/tests/testthat/test.start_qsub.R @@ -0,0 +1,11 @@ +test_that("`start_qsub()` able to correctly make the command to be executed remotely to start qsub runs", { + mocked_res <- mockery::mock(0) + mockery::stub(start_qsub, 'remote.execute.cmd', mocked_res) + res <- start_qsub(1, "qsub -N @NAME@ -o @STDOUT@ -e @STDERR@", "test_rundir", "pecan", "test_host_rundir", "test_host_outdir", "test_stdout_log", "test_stderr_log", "test_job_script") + args <- mockery::mock_args(mocked_res) + expect_equal(args[[1]][[1]], 'pecan') + expect_equal(args[[1]][[2]], c('qsub', '-N', 'PEcAn-1', '-o', 'test_host_outdir/1/test_stdout_log', '-e', 'test_host_outdir/1/test_stderr_log')) + expect_equal(args[[1]][[3]][[1]], 'test_host_rundir/1/test_job_script') + expect_equal(args[[1]]$stderr, TRUE) + expect_equal(res, 0) +}) \ No newline at end of file diff --git a/base/remote/tests/testthat/test.start_rabbitmq.R b/base/remote/tests/testthat/test.start_rabbitmq.R new file mode 100644 index 00000000000..85156447903 --- /dev/null +++ b/base/remote/tests/testthat/test.start_rabbitmq.R @@ -0,0 +1,13 @@ +test_that("`start_rabbitmq()` able to correctly read the environment varibles and send desired values to rabbitmq_post_message", { + withr::with_envvar(c("RABBITMQ_PREFIX" = "prefix", "RABBITMQ_PORT" = "3000"),{ + mocked_res <- mockery::mock(TRUE) + mockery::stub(start_rabbitmq, 'rabbitmq_post_message', mocked_res) + res <- start_rabbitmq('test_folder', 'test_uri', 'test_queue') + args <- mockery::mock_args(mocked_res) + expect_equal(args[[1]][[1]], 'test_uri') + expect_equal(args[[1]][[2]], 'test_queue') + expect_equal(args[[1]][[3]], list(folder = 'test_folder')) + expect_equal(args[[1]][[4]], 'prefix') + expect_equal(args[[1]][[5]], '3000') + }) +}) \ No newline at end of file diff --git a/base/remote/tests/testthat/test.start_serial.R b/base/remote/tests/testthat/test.start_serial.R new file mode 100644 index 00000000000..8f0e558515b --- /dev/null +++ b/base/remote/tests/testthat/test.start_serial.R @@ -0,0 +1,9 @@ +test_that("`start_serial()` able to pass desired parameters to execute command remotely to start model execution in serial mode",{ + mocked_res <- mockery::mock(TRUE) + mockery::stub(start_serial, 'remote.execute.cmd', mocked_res) + res <- start_serial('test_run', 'pecan', 'test_rundir', 'test_host_rundir', 'test_job_script') + args <- mockery::mock_args(mocked_res) + expect_equal(args[[1]][[1]], 'pecan') + expect_equal(args[[1]][[2]], 'test_host_rundir/test_run/test_job_script') + expect_equal(res, TRUE) +}) \ No newline at end of file diff --git a/base/settings/DESCRIPTION b/base/settings/DESCRIPTION index 009f84fb437..f932a6398cb 100644 --- a/base/settings/DESCRIPTION +++ b/base/settings/DESCRIPTION @@ -5,8 +5,7 @@ Authors@R: c(person("David", "LeBauer", role = c("aut", "cre"), person("Rob", "Kooper", role = c("aut"), email = "kooper@illinois.edu"), person("University of Illinois, NCSA", role = c("cph"))) -Version: 1.7.2 -Date: 2021-10-04 +Version: 1.8.0.9000 License: BSD_3_clause + file LICENSE Copyright: Authors LazyLoad: yes @@ -25,8 +24,9 @@ Imports: XML (>= 3.98-1.3), optparse Suggests: + mockery, testthat (>= 2.0.0), withr Encoding: UTF-8 -RoxygenNote: 7.2.3 +RoxygenNote: 7.3.2 Roxygen: list(markdown = TRUE) diff --git a/base/settings/LICENSE b/base/settings/LICENSE index 5a9e44128f1..09ef35a60b4 100644 --- a/base/settings/LICENSE +++ b/base/settings/LICENSE @@ -1,34 +1,3 @@ -## This is the master copy of the PEcAn License - -University of Illinois/NCSA Open Source License - -Copyright (c) 2012, University of Illinois, NCSA. All rights reserved. - -PEcAn project -www.pecanproject.org - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal with the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -- Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimers. -- Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimers in the - documentation and/or other materials provided with the distribution. -- Neither the names of University of Illinois, NCSA, nor the names - of its contributors may be used to endorse or promote products - derived from this Software without specific prior written permission. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR -ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF -CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE SOFTWARE. - +YEAR: 2024 +COPYRIGHT HOLDER: PEcAn Project +ORGANIZATION: PEcAn Project, authors affiliations diff --git a/base/settings/NEWS.md b/base/settings/NEWS.md new file mode 100644 index 00000000000..5cfc2101b4a --- /dev/null +++ b/base/settings/NEWS.md @@ -0,0 +1,15 @@ +# PEcAn.settings 1.8.0.9000 + +* PEcAn.settings is now distributed under the BSD 3-clause license instead of the NCSA Open Source license. + + +# PEcAn.settings 1.8.0 + +* Bug fixes for ensemble runs. + + +# PEcAn.settings 1.7.1 + +* All changes in 1.7.1 and earlier were recorded in a single file for all of the + PEcAn packages; please see + https://github.com/PecanProject/pecan/blob/v1.7.1/CHANGELOG.md for details. diff --git a/base/settings/R/MultiSettings.R b/base/settings/R/MultiSettings.R index cfc1ef3e712..b61d6af9bea 100644 --- a/base/settings/R/MultiSettings.R +++ b/base/settings/R/MultiSettings.R @@ -138,6 +138,11 @@ names.MultiSettings <- function(x) { stop("Can't name MultiSettings this way. Use settingNames() instead.") } +#' function that can retrieve or update the names of multi-settings. +#' +#' @param multiSettings object for which to retrieve or set the names. +#' @param settingNames names to be set for the multi-settings object. +#' #' @export settingNames <- function(multiSettings, settingNames) { if (missing(settingNames)) { @@ -159,6 +164,10 @@ print.MultiSettings <- function(x, printAll = FALSE, ...) { } } +#' generic function for printing contents of objects. +#' +#' @param x object to be printed. +#' #' @export printAll <- function(x) { UseMethod("printAll", x) @@ -196,7 +205,10 @@ listToXml.MultiSettings <- function(item, tag, collapse = TRUE) { NextMethod() } # listToXml.MultiSettings - +#' generic function for expanding multi-settings. +#' +#' @param x object to be expanded. +#' #' @export expandMultiSettings <- function(x) { UseMethod("expandMultiSettings") diff --git a/base/settings/R/SafeList.R b/base/settings/R/SafeList.R index 6eeebe18264..29038fdd0b0 100644 --- a/base/settings/R/SafeList.R +++ b/base/settings/R/SafeList.R @@ -1,12 +1,3 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2012 University of Illinois, NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# University of Illinois/NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- - #' Create a SafeList object #' #' `SafeList` is a wrapper class for the normal R list. It should behave diff --git a/base/settings/R/Settings.R b/base/settings/R/Settings.R index a3ec1cfcd06..f845ab6b9ab 100644 --- a/base/settings/R/Settings.R +++ b/base/settings/R/Settings.R @@ -1,11 +1,3 @@ -##----------------------------------------------------------------------------- -## Copyright (c) 2012 University of Illinois, NCSA. -## All rights reserved. This program and the accompanying materials -## are made available under the terms of the -## University of Illinois/NCSA Open Source License -## which accompanies this distribution, and is available at -## http://opensource.ncsa.illinois.edu/license.html -##----------------------------------------------------------------------------- #' Create a PEcAn Settings object #' #' @title Create a PEcAn Settings object diff --git a/base/settings/R/addSecrets.R b/base/settings/R/addSecrets.R index 032f33af046..65f7bc6f415 100644 --- a/base/settings/R/addSecrets.R +++ b/base/settings/R/addSecrets.R @@ -1,20 +1,13 @@ -##----------------------------------------------------------------------------- -## Copyright (c) 2012 University of Illinois, NCSA. -## All rights reserved. This program and the accompanying materials -## are made available under the terms of the -## University of Illinois/NCSA Open Source License -## which accompanies this distribution, and is available at -## http://opensource.ncsa.illinois.edu/license.html -##----------------------------------------------------------------------------- #' Add secret information from ~/.pecan.xml #' #' Copies certains sections from ~/.pecan.xml to the settings. This allows #' a user to have their own unique parameters also when sharing the #' pecan.xml file we don't expose these secrets. -#' Currently this will copy the database and browndog sections +#' Currently this will copy the database sections #' #' @title Add Users secrets #' @param settings settings file +#' @param force Logical: add secrets even if they have been added previously? #' @return will return the updated settings values #' @author Rob Kooper #' @export addSecrets @@ -51,19 +44,6 @@ addSecrets <- function(settings, force = FALSE) { } } - # only copy these sections if tag exists - for (key in c("browndog")) { - if (! key %in% names(settings)) next - - for (section in names(pecan[[key]])) { - if (section %in% names(settings[section])) { - PEcAn.logger::logger.info("Already have a section for", section) - } else { - PEcAn.logger::logger.info("Imported section for", section) - settings[[key]][section] <- pecan[[key]][section] - } - } - } return(invisible(settings)) } diff --git a/base/settings/R/check.all.settings.R b/base/settings/R/check.all.settings.R index 35999a50302..bc19f96d2e9 100644 --- a/base/settings/R/check.all.settings.R +++ b/base/settings/R/check.all.settings.R @@ -1,12 +1,3 @@ -##----------------------------------------------------------------------------- -## Copyright (c) 2012 University of Illinois, NCSA. -## All rights reserved. This program and the accompanying materials -## are made available under the terms of the -## University of Illinois/NCSA Open Source License -## which accompanies this distribution, and is available at -## http://opensource.ncsa.illinois.edu/license.html -##----------------------------------------------------------------------------- - #' check to see if inputs are specified - this should be part of the model code #' @title Check Inputs #' @param settings settings file @@ -260,13 +251,11 @@ check.bety.version <- function(dbcon) { } # check if database is newer - last_migration_date <- lubridate::ymd_hms(utils::tail(versions, n = 1)) - pecan_release_date <- lubridate::ymd( - utils::packageDescription("PEcAn.DB")$Date) - if (last_migration_date > pecan_release_date) { + unknown_migrations <- setdiff(versions, .known_bety_migrations) + if (any(unknown_migrations)) { PEcAn.logger::logger.warn( - "Last database migration", utils::tail(versions, n = 1), - "is more recent than this", pecan_release_date, "release of PEcAn.", + "Found database migration(s) not known by this release of PEcAn.settings:", + unknown_migrations, "This could result in PEcAn not working as expected.") } } @@ -279,6 +268,7 @@ check.bety.version <- function(dbcon) { #' - pfts with at least one pft defined #' @title Check Settings #' @param settings settings file +#' @param force Logical: Rerun check even if these settings have been checked previously? #' @return will return the updated settings values with defaults set. #' @author Rob Kooper, David LeBauer #' @export check.settings @@ -614,6 +604,7 @@ check.settings <- function(settings, force = FALSE) { #' @title Check Run Settings #' @param settings settings file +#' @param dbcon database connection. #' @export check.run.settings check.run.settings <- function(settings, dbcon = NULL) { scipen <- getOption("scipen") @@ -727,6 +718,7 @@ check.run.settings <- function(settings, dbcon = NULL) { if (is.null(settings$run$site$id)) { settings$run$site$id <- -1 } else if (settings$run$site$id >= 0) { + site <- NULL if (!is.null(dbcon)) { site <- PEcAn.DB::db.query( paste( @@ -804,6 +796,7 @@ check.run.settings <- function(settings, dbcon = NULL) { #' @title Check Model Settings #' @param settings settings file +#' @param dbcon database connection. #' @export check.model.settings check.model.settings <- function(settings, dbcon = NULL) { # check modelid with values @@ -934,9 +927,10 @@ check.model.settings <- function(settings, dbcon = NULL) { return(settings) } -#' @title Check Workflow Settings +#' Check Workflow Settings #' @param settings settings file -#' @export check.workflow.settings +#' @param dbcon database connection +#' @export check.workflow.settings <- function(settings, dbcon = NULL) { # check for workflow defaults fixoutdir <- FALSE @@ -1076,6 +1070,7 @@ check.database.settings <- function(settings) { #' @param settings settings file #' @export check.ensemble.settings check.ensemble.settings <- function(settings) { + # check ensemble if (!is.null(settings$ensemble)) { if (is.null(settings$ensemble$variable)) { @@ -1096,6 +1091,10 @@ check.ensemble.settings <- function(settings) { if (is.null(settings$ensemble$start.year)) { if (!is.null(settings$run$start.date)) { + startdate <- lubridate::parse_date_time( + settings$run$start.date, + "ymd_HMS", + truncated = 3) settings$ensemble$start.year <- lubridate::year( settings$run$start.date) PEcAn.logger::logger.info( @@ -1115,6 +1114,10 @@ check.ensemble.settings <- function(settings) { if (is.null(settings$ensemble$end.year)) { if (!is.null(settings$run$end.date)) { + enddate <- lubridate::parse_date_time( + settings$run$end.date, + "ymd_HMS", + truncated = 3) settings$ensemble$end.year <- lubridate::year(settings$run$end.date) PEcAn.logger::logger.info( "No end date passed to ensemble - using the run date (", @@ -1148,21 +1151,22 @@ check.ensemble.settings <- function(settings) { PEcAn.logger::logger.severe( "Start year of ensemble should come before the end year of the ensemble") } - } - # Old version of pecan xml files which they don't have a sampling space - # or it's just sampling space and nothing inside it. - if (is.null(settings$ensemble$samplingspace) - || !is.list(settings$ensemble$samplingspace)) { - PEcAn.logger::logger.info( - "We are updating the ensemble tag inside the xml file.") - # I try to put ensemble method in older versions into the parameter space - - # If I fail (when no method is defined) I just set it as uniform - settings$ensemble$samplingspace$parameters$method <- settings$ensemble$method - if (is.null(settings$ensemble$samplingspace$parameters$method)) { - settings$ensemble$samplingspace$parameters$method <- "uniform" + + # Old version of pecan xml files which they don't have a sampling space + # or it's just sampling space and nothing inside it. + if (is.null(settings$ensemble$samplingspace) + || !is.list(settings$ensemble$samplingspace)) { + PEcAn.logger::logger.info( + "We are updating the ensemble tag inside the xml file.") + # I try to put ensemble method in older versions into the parameter space - + # If I fail (when no method is defined) I just set it as uniform + settings$ensemble$samplingspace$parameters$method <- settings$ensemble$method + if (is.null(settings$ensemble$samplingspace$parameters$method)) { + settings$ensemble$samplingspace$parameters$method <- "uniform" + } + #putting something simple in the met + settings$ensemble$samplingspace$met$method <- "sampling" } - #putting something simple in the met - settings$ensemble$samplingspace$met$method <- "sampling" } return(settings) } diff --git a/base/settings/R/clean.settings.R b/base/settings/R/clean.settings.R index 8cb101ab159..68c7db21487 100644 --- a/base/settings/R/clean.settings.R +++ b/base/settings/R/clean.settings.R @@ -1,12 +1,3 @@ -##----------------------------------------------------------------------------- -## Copyright (c) 2012 University of Illinois, NCSA. -## All rights reserved. This program and the accompanying materials -## are made available under the terms of the -## University of Illinois/NCSA Open Source License -## which accompanies this distribution, and is available at -## http://opensource.ncsa.illinois.edu/license.html -##----------------------------------------------------------------------------- - #' Cleans PEcAn settings file #' #' This will try and clean the settings file so it is ready for @@ -15,6 +6,7 @@ #' @param inputfile the PEcAn settings file to be used. #' @param outputfile the name of file to which the settings will be #' written inside the outputdir. +#' @param write Indicates whether to write the modified settings to a file. #' @return list of all settings as saved to the XML file(s) #' @export clean.settings #' @author Rob Kooper diff --git a/base/settings/R/fix.deprecated.settings.R b/base/settings/R/fix.deprecated.settings.R index bd1da340567..4fddbcdb0fb 100644 --- a/base/settings/R/fix.deprecated.settings.R +++ b/base/settings/R/fix.deprecated.settings.R @@ -1,15 +1,8 @@ -##----------------------------------------------------------------------------- -## Copyright (c) 2012 University of Illinois, NCSA. -## All rights reserved. This program and the accompanying materials -## are made available under the terms of the -## University of Illinois/NCSA Open Source License -## which accompanies this distribution, and is available at -## http://opensource.ncsa.illinois.edu/license.html -##----------------------------------------------------------------------------- #' Checks for and attempts to fix deprecated settings structure #' #' @title Fix Deprecated Settings #' @param settings settings list +#' @param force Logical: re-run fixing of deprecated settings even if it has been done previously? #' @return updated settings list #' @author Ryan Kelly #' @export fix.deprecated.settings diff --git a/base/settings/R/known_bety_migrations.R b/base/settings/R/known_bety_migrations.R new file mode 100644 index 00000000000..21a670ae34c --- /dev/null +++ b/base/settings/R/known_bety_migrations.R @@ -0,0 +1,69 @@ +# List of the entries in bety table `schema_migrations` that are known to this +# version of PEcAn.settings. If a live query returns entries not in this table, +# we may wish to warn about a potential mismatch between package and database. +# +# TODO: Would it make sense to move this, and the checks it supports, to PEcAn.DB? +.known_bety_migrations <- c( + "1", + "20130104205059", + "20130104211901", + "20130104211946", + "20130109205535", + "20130222222929", + "20130425152503", + "20130624001504", + "20130629205658", + "20130707190720", + "20130717162614", + "20130813212131", + "20130829162053", + "20150904184512", + "20130830184559", + "20140418005637", + "20140422155957", + "20140423220457", + "20140506210037", + "20140515205254", + "20140521180349", + "20140604192901", + "20140617163304", + "20140610210928", + "20140621060009", + "20140623004229", + "20140624185610", + "20140708232320", + "20140729045640", + "20151007174432", + "20151011190026", + "20140904220035", + "20140904221818", + "20140909212759", + "20140915153555", + "20141009160121", + "20141208165401", + "20141211220550", + "20150202215147", + "20150202220519", + "20150213162341", + "20150313165132", + "20150521211114", + "20150624220952", + "20150624222656", + "20150625184958", + "20151014182146", + "20160303221049", + "20160412030352", + "20160523165531", + "20160617133217", + "20160711231257", + "20160720182233", + "20160930213737", + "20161003180105", + "20161005181021", + "20161129192658", + "20170118205944", + "20170415183619", + "20170712171513", + "20180510184222", + "20181129000515", + "20200329233137") diff --git a/base/settings/R/listToXml.R b/base/settings/R/listToXml.R index c9b3ed5e37c..e354a33e6c4 100644 --- a/base/settings/R/listToXml.R +++ b/base/settings/R/listToXml.R @@ -1,3 +1,7 @@ +#' A generic function to convert list to XML +#' +#' @param x list to be converted +#' @param ... arguments passed to methods #' @export listToXml <- function(x, ...) { UseMethod("listToXml") @@ -8,43 +12,53 @@ listToXml <- function(x, ...) { #' #' Can convert list or other object to an xml object using xmlNode #' @title List to XML -#' @param item object to be converted. +#' @param x object to be converted. #' Despite the function name, need not actually be a list -#' @param tag xml tag +#' @param ... further arguments. +#' Used to set the element name of the created XML object, +#' which is taken from an argument named `tag` if present, +#' or otherwise from the first element of `...` #' @return xmlNode #' @export #' @author David LeBauer, Carl Davidson, Rob Kooper -listToXml.default <- function(item, tag) { - +listToXml.default <- function(x, ...) { + args <- list(...) + if (length(args) == 0) { + PEcAn.logger::logger.error("no tag provided") + } else if ("tag" %in% names(args)) { + tag <- args$tag + } else { + tag <- args[[1]] + } # just a textnode, or empty node with attributes - if (typeof(item) != "list") { - if (length(item) > 1) { + if (typeof(x) != "list") { + if (length(x) > 1) { xml <- XML::xmlNode(tag) - for (name in names(item)) { - XML::xmlAttrs(xml)[[name]] <- item[[name]] + for (name in names(x)) { + XML::xmlAttrs(xml)[[name]] <- x[[name]] } return(xml) } else { - return(XML::xmlNode(tag, item)) + return(XML::xmlNode(tag, x)) } } - + # create the node - if (identical(names(item), c("text", ".attrs"))) { + if (identical(names(x), c("text", ".attrs"))) { # special case a node with text and attributes - xml <- XML::xmlNode(tag, item[["text"]]) + xml <- XML::xmlNode(tag, x[["text"]]) } else { # node with child nodes xml <- XML::xmlNode(tag) - for (i in seq_along(item)) { - if (is.null(names(item)) || names(item)[i] != ".attrs") { - xml <- XML::append.xmlNode(xml, listToXml(item[[i]], names(item)[i])) + for (i in seq_along(x)) { + if (is.null(names(x)) || names(x)[i] != ".attrs") { + xml <- XML::append.xmlNode(xml, listToXml(x[[i]], names(x)[i])) } } } - + # add attributes to node - attrs <- item[[".attrs"]] + attrs <- x[[".attrs"]] for (name in names(attrs)) { XML::xmlAttrs(xml)[[name]] <- attrs[[name]] } diff --git a/base/settings/R/papply.R b/base/settings/R/papply.R index 155e736ee53..b76f0753b50 100644 --- a/base/settings/R/papply.R +++ b/base/settings/R/papply.R @@ -50,7 +50,10 @@ papply <- function(settings, fn, ..., stop.on.error = FALSE) { "papply executing ", deparse(substitute(fn)), "on element ", i, " of ", length(settings), ".") - result.i <- try(fn(settings[[i]], ...), silent = TRUE) + tmp = settings[[i]] + if(all(grepl("settings",names(tmp$run)))) tmp$run = tmp$run[[i]] + + result.i <- try(fn(tmp, ...), silent = TRUE) if (!inherits(result.i, "try-error")) { ind <- length(result) + 1 diff --git a/base/settings/R/pft_site_linker.R b/base/settings/R/pft_site_linker.R index c3ca5cec55c..cda8afdbe84 100644 --- a/base/settings/R/pft_site_linker.R +++ b/base/settings/R/pft_site_linker.R @@ -56,16 +56,18 @@ site.pft.linkage <- function(settings, site.pft.links) { "Since your site xml tag does NOT have a site id,", "we can not assign a PFT to it. The site of this site is", (site.setting[["run"]])$site$name) - } - # see if we can find this site id in the LUT - if (site.id %in% site.pft.links$site) { - site.pft <- site.pft.links$pft[which(site.pft.links$site %in% site.id)] - } - # if there was a pft associated with that - if (!is.null(site.pft)) { - site.setting[["run"]]$site$site.pft <- stats::setNames( - as.list(site.pft), - rep("pft.name", length(site.pft))) + } else { + # see if we can find this site id in the LUT + if (site.id %in% site.pft.links$site) { + site.pft <- site.pft.links$pft[which(site.pft.links$site %in% site.id)] + } + # if there was a pft associated with that + if (!is.null(site.pft)) { + site.setting[["run"]]$site$site.pft <- stats::setNames( + as.list(site.pft), + rep("pft.name", length(site.pft)) + ) + } } return(site.setting) }) diff --git a/base/settings/R/read.settings.R b/base/settings/R/read.settings.R index da590c44d06..4e3b86c582e 100644 --- a/base/settings/R/read.settings.R +++ b/base/settings/R/read.settings.R @@ -1,12 +1,3 @@ -##----------------------------------------------------------------------------- -## Copyright (c) 2012 University of Illinois, NCSA. -## All rights reserved. This program and the accompanying materials -## are made available under the terms of the -## University of Illinois/NCSA Open Source License -## which accompanies this distribution, and is available at -## http://opensource.ncsa.illinois.edu/license.html -##----------------------------------------------------------------------------- - #' Loads PEcAn settings file #' #' This will try and find the PEcAn settings file in the following order: diff --git a/base/settings/R/update.settings.R b/base/settings/R/update.settings.R index 0d6c4ab4ee9..03b8aea011b 100644 --- a/base/settings/R/update.settings.R +++ b/base/settings/R/update.settings.R @@ -1,17 +1,10 @@ -##----------------------------------------------------------------------------- -## Copyright (c) 2012 University of Illinois, NCSA. -## All rights reserved. This program and the accompanying materials -## are made available under the terms of the -## University of Illinois/NCSA Open Source License -## which accompanies this distribution, and is available at -## http://opensource.ncsa.illinois.edu/license.html -##----------------------------------------------------------------------------- #' Updates a pecan.xml file to match new layout. This will take care of the #' conversion to the latest pecan.xml file. #' #' @title Update Settings #' @name update.settings #' @param settings settings file +#' @param force Logical: update even if settings have previously been updated?. #' @return will return the updated settings values #' @export update.settings #' @author Rob Kooper @@ -43,7 +36,7 @@ update.settings <- function(settings, force = FALSE) { PEcAn.logger::logger.info( "Database tag has changed, please use to store", "information about accessing the BETY database. See also", - "https://github.com/PecanProject/pecan/wiki/PEcAn-Configuration#database-access.") + "https://pecanproject.github.io/pecan-documentation/develop/pecanXML.html.") bety <- list() for (name in names(settings$database)) { @@ -57,7 +50,7 @@ update.settings <- function(settings, force = FALSE) { PEcAn.logger::logger.warn( " is now part of the database settings. For more", "information about the database settings see", - "https://github.com/PecanProject/pecan/wiki/PEcAn-Configuration#database-access.") + "https://pecanproject.github.io/pecan-documentation/develop/pecanXML.html.") if (is.null(settings$database$bety$write)) { settings$database$bety$write <- settings$bety$write settings$bety$write <- NULL @@ -81,7 +74,7 @@ update.settings <- function(settings, force = FALSE) { PEcAn.logger::logger.info( "Model tag has changed, please use to specify", "type of model. See also", - "https://github.com/PecanProject/pecan/wiki/PEcAn-Configuration#model_setup.") + "https://pecanproject.github.io/pecan-documentation/develop/pecanXML.html.") settings$model$type <- settings$model$model_type settings$model$model_type <- NULL } @@ -99,7 +92,7 @@ update.settings <- function(settings, force = FALSE) { PEcAn.logger::logger.info( "Model tag has changed, please use to specify", "type of model. See also", - "https://github.com/PecanProject/pecan/wiki/PEcAn-Configuration#model_setup.") + "https://pecanproject.github.io/pecan-documentation/develop/pecanXML.html.") settings$model$type <- settings$model$name settings$model$name <- NULL } @@ -121,7 +114,7 @@ update.settings <- function(settings, force = FALSE) { PEcAn.logger::logger.info( "Model tag has changed, please use to specify", "met file for a run. See also", - "https://github.com/PecanProject/pecan/wiki/PEcAn-Configuration#run_setup.") + "https://pecanproject.github.io/pecan-documentation/develop/pecanXML.html.") settings$run$inputs$met$path <- settings$run$site$met settings$run$site$met <- NULL } @@ -180,7 +173,7 @@ update.settings <- function(settings, force = FALSE) { PEcAn.logger::logger.info( "Model tag has changed, please use to specify", "veg file for a run. See also", - "https://github.com/PecanProject/pecan/wiki/PEcAn-Configuration#run_setup.") + "https://pecanproject.github.io/pecan-documentation/develop/pecanXML.html.") settings$run$inputs$veg <- settings$model$veg settings$model$veg <- NULL } @@ -200,7 +193,7 @@ update.settings <- function(settings, force = FALSE) { PEcAn.logger::logger.info( "Model tag has changed, please use to specify", "soil file for a run. See also", - "https://github.com/PecanProject/pecan/wiki/PEcAn-Configuration#run_setup.") + "https://pecanproject.github.io/pecan-documentation/develop/pecanXML.html.") settings$run$inputs$soil <- settings$model$soil settings$model$soil <- NULL } @@ -215,7 +208,7 @@ update.settings <- function(settings, force = FALSE) { PEcAn.logger::logger.info( "Model tag has changed, please use to specify", "pss/css/site file for a run. See also", - "https://github.com/PecanProject/pecan/wiki/PEcAn-Configuration#run_setup.") + "https://pecanproject.github.io/pecan-documentation/develop/pecanXML.html.") settings$run$inputs$pss <- file.path(settings$model$psscss, "foo.pss") settings$run$inputs$css <- file.path(settings$model$psscss, "foo.css") settings$run$inputs$site <- file.path(settings$model$psscss, "foo.site") @@ -232,7 +225,7 @@ update.settings <- function(settings, force = FALSE) { PEcAn.logger::logger.info( "Model tag has changed, please use to specify", "lu/thsums file for a run. See also", - "https://github.com/PecanProject/pecan/wiki/PEcAn-Configuration#run_setup.") + "https://pecanproject.github.io/pecan-documentation/develop/pecanXML.html.") settings$run$inputs$lu <- file.path(settings$model$inputs, "glu") settings$run$inputs$thsums <- settings$model$inputs settings$model$soil <- NULL diff --git a/base/settings/R/version.R b/base/settings/R/version.R new file mode 100644 index 00000000000..0e58d885272 --- /dev/null +++ b/base/settings/R/version.R @@ -0,0 +1,3 @@ +# Set at package install time, used by pecan.all::pecan_version() +# to identify development versions of packages +.build_hash <- Sys.getenv("PECAN_GIT_REV", "unknown") diff --git a/base/settings/man/.Rapp.history b/base/settings/man/.Rapp.history deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/base/settings/man/addSecrets.Rd b/base/settings/man/addSecrets.Rd index 697f040afa6..e3366eda13c 100644 --- a/base/settings/man/addSecrets.Rd +++ b/base/settings/man/addSecrets.Rd @@ -8,6 +8,8 @@ addSecrets(settings, force = FALSE) } \arguments{ \item{settings}{settings file} + +\item{force}{Logical: add secrets even if they have been added previously?} } \value{ will return the updated settings values @@ -19,7 +21,7 @@ Add secret information from ~/.pecan.xml Copies certains sections from ~/.pecan.xml to the settings. This allows a user to have their own unique parameters also when sharing the pecan.xml file we don't expose these secrets. -Currently this will copy the database and browndog sections +Currently this will copy the database sections } \author{ Rob Kooper diff --git a/base/settings/man/check.model.settings.Rd b/base/settings/man/check.model.settings.Rd index b3d0314c72b..eb2b2047709 100644 --- a/base/settings/man/check.model.settings.Rd +++ b/base/settings/man/check.model.settings.Rd @@ -8,6 +8,8 @@ check.model.settings(settings, dbcon = NULL) } \arguments{ \item{settings}{settings file} + +\item{dbcon}{database connection.} } \description{ Check Model Settings diff --git a/base/settings/man/check.run.settings.Rd b/base/settings/man/check.run.settings.Rd index 8dd2d6fad04..5ea16b01aab 100644 --- a/base/settings/man/check.run.settings.Rd +++ b/base/settings/man/check.run.settings.Rd @@ -8,6 +8,8 @@ check.run.settings(settings, dbcon = NULL) } \arguments{ \item{settings}{settings file} + +\item{dbcon}{database connection.} } \description{ Check Run Settings diff --git a/base/settings/man/check.settings.Rd b/base/settings/man/check.settings.Rd index dd2afb23086..d47a438a4a0 100644 --- a/base/settings/man/check.settings.Rd +++ b/base/settings/man/check.settings.Rd @@ -8,6 +8,8 @@ check.settings(settings, force = FALSE) } \arguments{ \item{settings}{settings file} + +\item{force}{Logical: Rerun check even if these settings have been checked previously?} } \value{ will return the updated settings values with defaults set. diff --git a/base/settings/man/check.workflow.settings.Rd b/base/settings/man/check.workflow.settings.Rd index 00416872adf..edf68041661 100644 --- a/base/settings/man/check.workflow.settings.Rd +++ b/base/settings/man/check.workflow.settings.Rd @@ -8,6 +8,8 @@ check.workflow.settings(settings, dbcon = NULL) } \arguments{ \item{settings}{settings file} + +\item{dbcon}{database connection} } \description{ Check Workflow Settings diff --git a/base/settings/man/clean.settings.Rd b/base/settings/man/clean.settings.Rd index 1cb552ce7f5..74d1a2a150e 100644 --- a/base/settings/man/clean.settings.Rd +++ b/base/settings/man/clean.settings.Rd @@ -11,6 +11,8 @@ clean.settings(inputfile = "pecan.xml", outputfile = "pecan.xml", write = TRUE) \item{outputfile}{the name of file to which the settings will be written inside the outputdir.} + +\item{write}{Indicates whether to write the modified settings to a file.} } \value{ list of all settings as saved to the XML file(s) diff --git a/base/settings/man/expandMultiSettings.Rd b/base/settings/man/expandMultiSettings.Rd new file mode 100644 index 00000000000..1ae291e1ae4 --- /dev/null +++ b/base/settings/man/expandMultiSettings.Rd @@ -0,0 +1,14 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/MultiSettings.R +\name{expandMultiSettings} +\alias{expandMultiSettings} +\title{generic function for expanding multi-settings.} +\usage{ +expandMultiSettings(x) +} +\arguments{ +\item{x}{object to be expanded.} +} +\description{ +generic function for expanding multi-settings. +} diff --git a/base/settings/man/fix.deprecated.settings.Rd b/base/settings/man/fix.deprecated.settings.Rd index 81af03064ce..44cb7d05713 100644 --- a/base/settings/man/fix.deprecated.settings.Rd +++ b/base/settings/man/fix.deprecated.settings.Rd @@ -8,6 +8,8 @@ fix.deprecated.settings(settings, force = FALSE) } \arguments{ \item{settings}{settings list} + +\item{force}{Logical: re-run fixing of deprecated settings even if it has been done previously?} } \value{ updated settings list diff --git a/base/settings/man/listToXml.Rd b/base/settings/man/listToXml.Rd new file mode 100644 index 00000000000..076082e7274 --- /dev/null +++ b/base/settings/man/listToXml.Rd @@ -0,0 +1,16 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/listToXml.R +\name{listToXml} +\alias{listToXml} +\title{A generic function to convert list to XML} +\usage{ +listToXml(x, ...) +} +\arguments{ +\item{x}{list to be converted} + +\item{...}{arguments passed to methods} +} +\description{ +A generic function to convert list to XML +} diff --git a/base/settings/man/listToXml.default.Rd b/base/settings/man/listToXml.default.Rd index 27228be16f7..87cb9a568e9 100644 --- a/base/settings/man/listToXml.default.Rd +++ b/base/settings/man/listToXml.default.Rd @@ -4,13 +4,16 @@ \alias{listToXml.default} \title{List to XML} \usage{ -\method{listToXml}{default}(item, tag) +\method{listToXml}{default}(x, ...) } \arguments{ -\item{item}{object to be converted. +\item{x}{object to be converted. Despite the function name, need not actually be a list} -\item{tag}{xml tag} +\item{...}{further arguments. +Used to set the element name of the created XML object, +which is taken from an argument named \code{tag} if present, +or otherwise from the first element of \code{...}} } \value{ xmlNode diff --git a/base/settings/man/printAll.Rd b/base/settings/man/printAll.Rd new file mode 100644 index 00000000000..e99ebda3420 --- /dev/null +++ b/base/settings/man/printAll.Rd @@ -0,0 +1,14 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/MultiSettings.R +\name{printAll} +\alias{printAll} +\title{generic function for printing contents of objects.} +\usage{ +printAll(x) +} +\arguments{ +\item{x}{object to be printed.} +} +\description{ +generic function for printing contents of objects. +} diff --git a/base/settings/man/settingNames.Rd b/base/settings/man/settingNames.Rd new file mode 100644 index 00000000000..7663933d712 --- /dev/null +++ b/base/settings/man/settingNames.Rd @@ -0,0 +1,16 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/MultiSettings.R +\name{settingNames} +\alias{settingNames} +\title{function that can retrieve or update the names of multi-settings.} +\usage{ +settingNames(multiSettings, settingNames) +} +\arguments{ +\item{multiSettings}{object for which to retrieve or set the names.} + +\item{settingNames}{names to be set for the multi-settings object.} +} +\description{ +function that can retrieve or update the names of multi-settings. +} diff --git a/base/settings/man/update.settings.Rd b/base/settings/man/update.settings.Rd index 5768825af83..cd473e2389d 100644 --- a/base/settings/man/update.settings.Rd +++ b/base/settings/man/update.settings.Rd @@ -8,6 +8,8 @@ } \arguments{ \item{settings}{settings file} + +\item{force}{Logical: update even if settings have previously been updated?.} } \value{ will return the updated settings values diff --git a/base/settings/tests/Rcheck_reference.log b/base/settings/tests/Rcheck_reference.log index e981f489931..19c37863095 100644 --- a/base/settings/tests/Rcheck_reference.log +++ b/base/settings/tests/Rcheck_reference.log @@ -140,36 +140,7 @@ All user-level objects in a package should have documentation entries. See chapter ‘Writing R documentation files’ in the ‘Writing R Extensions’ manual. * checking for code/documentation mismatches ... OK -* checking Rd \usage sections ... WARNING -Undocumented arguments in documentation object 'addSecrets' - ‘force’ - -Undocumented arguments in documentation object 'check.model.settings' - ‘dbcon’ - -Undocumented arguments in documentation object 'check.run.settings' - ‘dbcon’ - -Undocumented arguments in documentation object 'check.settings' - ‘force’ - -Undocumented arguments in documentation object 'check.workflow.settings' - ‘dbcon’ - -Undocumented arguments in documentation object 'clean.settings' - ‘write’ - -Undocumented arguments in documentation object 'fix.deprecated.settings' - ‘force’ - -Undocumented arguments in documentation object 'update.settings' - ‘force’ - -Functions with \usage entries need to have the appropriate \alias -entries, and all their arguments documented. -The \usage entries must correspond to syntactically valid R code. -See chapter ‘Writing R documentation files’ in the ‘Writing R -Extensions’ manual. +* checking Rd \usage sections ... OK * checking Rd contents ... OK * checking for unstated dependencies in examples ... OK * checking examples ... OK @@ -179,4 +150,4 @@ Extensions’ manual. * checking for detritus in the temp directory ... OK * DONE -Status: 5 WARNINGs, 3 NOTEs +Status: 3 WARNINGs, 2 NOTEs diff --git a/base/settings/tests/testthat.R b/base/settings/tests/testthat.R index 9842744f2a8..c943b3f7592 100644 --- a/base/settings/tests/testthat.R +++ b/base/settings/tests/testthat.R @@ -1,11 +1,3 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2012 University of Illinois, NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# University of Illinois/NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- library(testthat) library(PEcAn.settings) diff --git a/base/settings/tests/testthat/testinput.pecan2.bu.edu.xml b/base/settings/tests/testthat/data/testinput.pecan2.bu.edu.xml similarity index 100% rename from base/settings/tests/testthat/testinput.pecan2.bu.edu.xml rename to base/settings/tests/testthat/data/testinput.pecan2.bu.edu.xml diff --git a/base/settings/tests/testthat/testinput.xml b/base/settings/tests/testthat/data/testinput.xml similarity index 100% rename from base/settings/tests/testthat/testinput.xml rename to base/settings/tests/testthat/data/testinput.xml diff --git a/base/settings/tests/testthat/data/testinputcleanup.xml b/base/settings/tests/testthat/data/testinputcleanup.xml new file mode 100644 index 00000000000..01d9d582199 --- /dev/null +++ b/base/settings/tests/testthat/data/testinputcleanup.xml @@ -0,0 +1,29 @@ + + + testdir + testdir + testdir + + /data/dbfiles + + + + salix + testdir + 1 + + + + localhost + /fs/data3/guestuser/pecan/testworkflow/run + /fs/data3/guestuser/pecan/testworkflow/out + + + + localhost + + + + 99000000006 + + \ No newline at end of file diff --git a/base/settings/tests/testthat/testsettings-comment.xml b/base/settings/tests/testthat/data/testsettings-comment.xml similarity index 100% rename from base/settings/tests/testthat/testsettings-comment.xml rename to base/settings/tests/testthat/data/testsettings-comment.xml diff --git a/base/settings/tests/testthat/testsettings.xml b/base/settings/tests/testthat/data/testsettings.xml similarity index 100% rename from base/settings/tests/testthat/testsettings.xml rename to base/settings/tests/testthat/data/testsettings.xml diff --git a/base/settings/tests/testthat/helper-get.test.settings.R b/base/settings/tests/testthat/helper-get.test.settings.R index 6da1380a14e..de1cc43a814 100644 --- a/base/settings/tests/testthat/helper-get.test.settings.R +++ b/base/settings/tests/testthat/helper-get.test.settings.R @@ -2,9 +2,9 @@ settings <- NULL try({ if (PEcAn.remote::fqdn() == "pecan2.bu.edu") { - settings <- read.settings("testinput.pecan2.bu.edu.xml") + settings <- read.settings("data/testinput.pecan2.bu.edu.xml") } else { - settings <- read.settings("testinput.xml") + settings <- read.settings("data/testinput.xml") } }, silent = TRUE) diff --git a/base/settings/tests/testthat/pecan.xml b/base/settings/tests/testthat/pecan.xml deleted file mode 100644 index d99187cc8a2..00000000000 --- a/base/settings/tests/testthat/pecan.xml +++ /dev/null @@ -1,12 +0,0 @@ - - /tmp/test/ - - - bety - bety - bety - localhost - bety - - - diff --git a/base/settings/tests/testthat/test.MultiSettings.class.R b/base/settings/tests/testthat/test.MultiSettings.class.R index 0e07a6a5d4f..28d2114b2a9 100644 --- a/base/settings/tests/testthat/test.MultiSettings.class.R +++ b/base/settings/tests/testthat/test.MultiSettings.class.R @@ -1,11 +1,3 @@ -#---------------------------------------------------------------------------- -## Copyright (c) 2012 University of Illinois, NCSA. -## All rights reserved. This program and the accompanying materials -## are made available under the terms of the -## University of Illinois/NCSA Open Source License -## which accompanies this distribution, and is available at -## http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------ context("test MultiSettings class") diff --git a/base/settings/tests/testthat/test.Safelist.class.R b/base/settings/tests/testthat/test.Safelist.class.R index 6bb7a319054..3ec7e60e7dc 100644 --- a/base/settings/tests/testthat/test.Safelist.class.R +++ b/base/settings/tests/testthat/test.Safelist.class.R @@ -1,11 +1,3 @@ -#---------------------------------------------------------------------------- -## Copyright (c) 2012 University of Illinois, NCSA. -## All rights reserved. This program and the accompanying materials -## are made available under the terms of the -## University of Illinois/NCSA Open Source License -## which accompanies this distribution, and is available at -## http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------ context("test SafeList class") test_that("SafeList constructors work as expected", { diff --git a/base/settings/tests/testthat/test.Settings.class.R b/base/settings/tests/testthat/test.Settings.class.R index e210d22184d..71d2f40f6ca 100644 --- a/base/settings/tests/testthat/test.Settings.class.R +++ b/base/settings/tests/testthat/test.Settings.class.R @@ -1,11 +1,3 @@ -#---------------------------------------------------------------------------- -## Copyright (c) 2012 University of Illinois, NCSA. -## All rights reserved. This program and the accompanying materials -## are made available under the terms of the -## University of Illinois/NCSA Open Source License -## which accompanies this distribution, and is available at -## http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------ context("test Settings class") test_that("Settings constructors work as expected", { diff --git a/base/settings/tests/testthat/test.addSecrets.R b/base/settings/tests/testthat/test.addSecrets.R new file mode 100644 index 00000000000..f683f135877 --- /dev/null +++ b/base/settings/tests/testthat/test.addSecrets.R @@ -0,0 +1,59 @@ +test_that("`addSecrets` returns settings without updating them when `~/.pecan.xml` does not exist", { + settings <- list() + mockery::stub(addSecrets, 'file.exists', FALSE) + expect_equal(addSecrets(settings), settings) +}) + +test_that("`addSecrets` returns settings without updating them when force is FALSE and secrets have already been added", { + settings <- list( + settings.info = list( + secrets.added = TRUE + ) + ) + mockery::stub(addSecrets, 'file.exists', TRUE) + expect_equal(addSecrets(settings, force = FALSE), settings) +}) + +test_that("`addSecrets` adds secret settings when force is TRUE and secrets have already been added", { + settings <- list( + settings.info = list( + secrets.added = TRUE + ) + ) + + mocked_xmlToList_result <- list( + database = list( + section = list( + name = "pecan", + password = "pecan" + ) + ) + ) + mockery::stub(addSecrets, 'file.exists', TRUE) + mockery::stub(addSecrets, 'xmlToList', mocked_xmlToList_result) + updated_settings <- addSecrets(settings, force = TRUE) + expect_equal(updated_settings$database$section$name, "pecan") + expect_equal(updated_settings$database$section$password, "pecan") +}) + +test_that("`addSecrets` adds secret settings when force is FALSE and secrets have not been added", { + settings <- list( + settings.info = list( + secrets.added = FALSE + ) + ) + + mocked_xmlToList_result <- list( + database = list( + section = list( + name = "pecan", + password = "pecan" + ) + ) + ) + mockery::stub(addSecrets, 'file.exists', TRUE) + mockery::stub(addSecrets, 'xmlToList', mocked_xmlToList_result) + updated_settings <- addSecrets(settings, force = FALSE) + expect_equal(updated_settings$database$section$name, "pecan") + expect_equal(updated_settings$database$section$password, "pecan") +}) \ No newline at end of file diff --git a/base/settings/tests/testthat/test.check.all.settings.R b/base/settings/tests/testthat/test.check.all.settings.R new file mode 100644 index 00000000000..9405cafb488 --- /dev/null +++ b/base/settings/tests/testthat/test.check.all.settings.R @@ -0,0 +1,245 @@ +test_that("`check.inputs()` able to set dbfile path for inputs", { + mockery::stub(check.inputs, 'PEcAn.DB::db.open', TRUE) + mockery::stub(check.inputs, 'PEcAn.DB::db.close', TRUE) + mockery::stub(check.inputs, 'PEcAn.DB::dbfile.file', "test/path/to/file") + + mocked_query_res = mockery::mock( + data.frame( + tag = "test", + format_id = 1, + required = TRUE + ), + data.frame( + format_id = 1 + ) + ) + mockery::stub(check.inputs, 'PEcAn.DB::db.query', mocked_query_res) + + settings <- list( + database = list( + bety = list() + ), + run = list( + inputs = list( + test = list( + id = 1 + ) + ) + ), + model = list( + type = "ed" + ) + ) + + updated_settings <- check.inputs(settings) + expect_equal(updated_settings$run$inputs$test$path, "test/path/to/file") +}) + +test_that("`check.run.settings` throws error if start date greater than end date in run settings", { + settings <- list( + run = list( + start.date = "2010-01-01", + end.date = "2009-01-01" + ) + ) + expect_error( + check.run.settings(settings), + "Start date should come before the end date." + ) +}) + +test_that("`check.run.settings` able to set sensitivity analysis parameters based on ensemble and run params", { + settings <- list( + sensitivity.analysis = list(), + ensemble = list( + variable = "GPP" + ), + run = list( + start.date = "2010-01-01", + end.date = "2015-01-01" + ) + ) + updated_settings <- check.run.settings(settings) + expect_equal(updated_settings$sensitivity.analysis$variable, "GPP") + expect_equal(updated_settings$sensitivity.analysis$start.year, 2010) + expect_equal(updated_settings$sensitivity.analysis$end.year, 2015) +}) + +test_that("`check.run.settings` able to update run site parameters based on site id passed", { + mockery::stub( + check.run.settings, + 'PEcAn.DB::db.query', + data.frame( + sitename = "US-1", + lat = 45, + lon = -90 + ) + ) + settings <- list( + run = list( + site = list( + id = 5 + ) + ) + ) + updated_settings <- check.run.settings(settings, 1) + expect_equal(updated_settings$run$site$id, 5) + expect_equal(updated_settings$run$site$name, "US-1") + expect_equal(updated_settings$run$site$lat, 45.0) + expect_equal(updated_settings$run$site$lon, -90.0) +}) + +test_that("`check.model.settings` able to update model parameters based on passed model id in settings", { + mockery::stub( + check.model.settings, + 'PEcAn.DB::db.query', + data.frame( + id = 7, + revision = 82, + name = "ED2", + type = "ed" + ) + ) + mockery::stub( + check.model.settings, + 'PEcAn.DB::dbfile.file', + "/usr/local/bin/ed2.r82" + ) + settings <- list( + model = list( + id = 7 + ) + ) + + updated_settings <- check.model.settings(settings, 1) + expect_equal(updated_settings$model$id, 7) + expect_equal(updated_settings$model$revision, 82) + expect_equal(updated_settings$model$type, "ed") + expect_equal(updated_settings$model$delete.raw, FALSE) + expect_equal(updated_settings$model$binary, "/usr/local/bin/ed2.r82") +}) + +test_that("`check.model.settings` able to update model parameters based on passed model type in settings", { + mockery::stub( + check.model.settings, + 'PEcAn.DB::db.query', + data.frame( + id = 7, + revision = 82, + name = "ED2", + type = "ed" + ) + ) + mockery::stub( + check.model.settings, + 'PEcAn.DB::dbfile.file', + "/usr/local/bin/ed2.r82" + ) + settings <- list( + model = list( + type = "ed" + ) + ) + + updated_settings <- check.model.settings(settings, 1) + expect_equal(updated_settings$model$id, 7) + expect_equal(updated_settings$model$revision, 82) + expect_equal(updated_settings$model$type, "ed") + expect_equal(updated_settings$model$delete.raw, FALSE) + expect_equal(updated_settings$model$binary, "/usr/local/bin/ed2.r82") +}) + +test_that("`check.workflow.settings` able to set workflow defaults in case they are not specified", { + mockery::stub(check.workflow.settings, 'PEcAn.DB::db.query', list(id = 100)) + mockery::stub(check.workflow.settings, 'file.exists', TRUE) + + settings <- list( + database = list( + bety = list( + write = TRUE + ) + ), + model = list( + id = 1 + ) + ) + updated_settings <- check.workflow.settings(settings, 1) + expect_equal(updated_settings$workflow$id, 100) + expect_equal(updated_settings$outdir, file.path(getwd(), "PEcAn_100")) +}) + +test_that("`check.database` able to set the database object with defaults correctly if nothing specified", { + rdriver <- paste0("R", "PostgreSQL") + withr::with_package(rdriver, { + mockery::stub(check.database, 'PEcAn.DB::db.exists', TRUE) + database <- list() + updated_database <- check.database(database) + expect_equal(updated_database$driver, "PostgreSQL") + expect_equal(updated_database$host, "localhost") + expect_equal(updated_database$user, "bety") + expect_equal(updated_database$password, "bety") + expect_equal(updated_database$dbname, "bety") + }) +}) + +test_that("`check.database.settings` able to set bety parameters correctly if they are not specified", { + mockery::stub(check.database.settings, 'PEcAn.DB::db.exists', TRUE) + mockery::stub( + check.database.settings, + 'check.database', + list( + driver = "PostgreSQL", + host = "localhost", + user = "bety", + password = "bety", + dbname = "bety" + ) + ) + mockery::stub(check.database.settings, 'PEcAn.DB::db.open', TRUE) + mockery::stub(check.database.settings, 'PEcAn.DB::db.close', TRUE) + mockery::stub(check.database.settings, 'check.bety.version', TRUE) + + settings <- list( + database = list( + bety = list() + ) + ) + + checked_settings <- check.database.settings(settings) + expect_equal(checked_settings$database$bety$driver, "PostgreSQL") + expect_equal(checked_settings$database$bety$host, "localhost") + expect_equal(checked_settings$database$bety$user, "bety") + expect_equal(checked_settings$database$bety$password, "bety") + expect_equal(checked_settings$database$bety$dbname, "bety") + expect_equal(checked_settings$database$bety$write, TRUE) +}) + +test_that("`check.ensemble.settings` throws an error if not variables specified to compute ensemble", { + settings <- list( + ensemble = list() + ) + expect_error( + check.ensemble.settings(settings), + "No variable specified to compute ensemble for." + ) +}) + +test_that("`check.ensemble.settings` able to update ensemble settings when variables, size, start and end year and sampling space not specified", { + settings <- list( + ensemble = list(), + sensitivity.analysis = list( + variable = "GPP" + ), + run = list( + start.date = "2000-01-01", + end.date = "2003-12-31" + ) + ) + settings <- check.ensemble.settings(settings) + expect_equal(settings$ensemble$variable, "GPP") + expect_equal(settings$ensemble$size, 1) + expect_equal(settings$ensemble$start.year, 2000) + expect_equal(settings$ensemble$end.year, 2003) + expect_equal(settings$ensemble$samplingspace$parameters$method, "uniform") + expect_equal(settings$ensemble$samplingspace$met$method, "sampling") +}) \ No newline at end of file diff --git a/base/settings/tests/testthat/test.check.bety.version.R b/base/settings/tests/testthat/test.check.bety.version.R new file mode 100644 index 00000000000..e59158aece3 --- /dev/null +++ b/base/settings/tests/testthat/test.check.bety.version.R @@ -0,0 +1,64 @@ + +PEcAn.logger::logger.setQuitOnSevere(FALSE) +on.exit(PEcAn.logger::logger.setQuitOnSevere(TRUE), add = TRUE) + + +test_that("`check.bety.version`` gives errors for missing significant versions", { + dbcon <- 1 + mockery::stub( + check.bety.version, + "PEcAn.DB::db.query", + list(version = c("2")) + ) + expect_error( + check.bety.version(dbcon), + "No version 1, how did this database get created?" + ) + + mockery::stub( + check.bety.version, + "PEcAn.DB::db.query", + list(version = c("1")) + ) + expect_error( + check.bety.version(dbcon), + "Missing migration 20140617163304, this associates files with models." + ) + + mockery::stub( + check.bety.version, + "PEcAn.DB::db.query", list(version = c("1","20140617163304")) + ) + expect_error( + check.bety.version(dbcon), + "Missing migration 20140708232320, this introduces geometry column in sites" + ) + + mockery::stub( + check.bety.version, + "PEcAn.DB::db.query", + list(version = c("1","20140617163304","20140708232320")) + ) + expect_error( + check.bety.version(dbcon), + "Missing migration 20140729045640, this introduces modeltypes table" + ) + + mockery::stub( + check.bety.version, + "PEcAn.DB::db.query", + list(version = c("1","20140617163304","20140708232320","20140729045640")) + ) + expect_error( + check.bety.version(dbcon), + "Missing migration 20151011190026, this introduces notes and user_id in workflows" + ) + + mockery::stub( + check.bety.version, + "PEcAn.DB::db.query", + list(version = c("1","20140617163304","20140708232320","20140729045640","20151011190026")) + ) + expect_silent(check.bety.version(dbcon)) +}) + diff --git a/base/settings/tests/testthat/test.clean.settings.R b/base/settings/tests/testthat/test.clean.settings.R new file mode 100644 index 00000000000..cf066e8220b --- /dev/null +++ b/base/settings/tests/testthat/test.clean.settings.R @@ -0,0 +1,34 @@ +PEcAn.logger::logger.setQuitOnSevere(FALSE) +on.exit(PEcAn.logger::logger.setQuitOnSevere(TRUE)) + +test_that("`test.clean.settings` works correctly for invalid and correct inputs", { + + # Error if input file is NULL or does not exist + expect_error( + clean.settings(inputfile = NULL), + "Could not find input file." + ) + expect_error( + clean.settings(inputfile = "nonexistent.xml"), + "Could not find input file." + ) + + # Works if correct input file provided + withr::with_tempfile("tf", { + clean.settings(inputfile = "data/testinputcleanup.xml", outputfile = tf) + test_xml <- readLines(tf) + t <- XML::xmlToList(XML::xmlParse(test_xml)) + + # Check for updated settings after cleanup + expect_equal(t$outdir, "pecan") + expect_equal(t$rundir, NULL) + expect_equal(t$modeloutdir, NULL) + expect_equal(t$host$rundir, NULL) + expect_equal(t$host$outdir, NULL) + expect_equal(t$database$dbfiles, NULL) + expect_equal(t$workflow, NULL) + expect_equal(t$pfts[[1]]$pft$outdir, NULL) + expect_equal(t$pfts[[1]]$pft$posteriorid, NULL) + expect_equal(t$host, list(name = "localhost")) + }) +}) diff --git a/base/settings/tests/testthat/test.createMultisiteMultiSettings.R b/base/settings/tests/testthat/test.createMultisiteMultiSettings.R new file mode 100644 index 00000000000..a74f1b45c0c --- /dev/null +++ b/base/settings/tests/testthat/test.createMultisiteMultiSettings.R @@ -0,0 +1,139 @@ +test_that("`createSitegroupMultiSettings` able to create a MultiSettings object containing (identical) run blocks for different sites in a site group", { + templateSettings <- Settings( + list( + run = list( + start.date = "2015-01-01", + end.date = "2015-12-31", + inputs = list("a", "b") + ) + ) + ) + siteIds <- list( + site_id = list("1000025731", "1000025732") + ) + mockery::stub( + createSitegroupMultiSettings, + 'PEcAn.DB::db.query', + siteIds + ) + + # without specifying nSite + multi_site_settings <- createSitegroupMultiSettings( + templateSettings = templateSettings, + sitegroupId = 10000, + params = NULL + ) + for(i in seq_along(multi_site_settings)) { + expect_equal(multi_site_settings[[i]]$run$site$id, siteIds$site_id[[i]]) + } + expect_equal(length(multi_site_settings), length(siteIds$site_id)) + + + # with nSite specified + multi_site_settings <- createSitegroupMultiSettings( + templateSettings = templateSettings, + sitegroupId = 10000, + nSite = 1, + params = NULL + ) + expect_equal(length(multi_site_settings), 1) +}) + +test_that("`createMultiSiteSettings` able to create a MultiSettings object containing (identical) run blocks for different sites", { + templateSettings <- Settings( + list( + run = list( + start.date = "2015-01-01", + end.date = "2015-12-31", + inputs = list("a", "b") + ) + ) + ) + siteIds <- c("1000025731", "1000025732") + multi_site_settings <- createMultiSiteSettings(templateSettings, siteIds) + for (i in seq_along(multi_site_settings)) { + expect_equal(multi_site_settings[[i]]$run$site$id, siteIds[i]) + expect_equal(multi_site_settings[[i]]$run$site$met.start, templateSettings$run$start.date) + expect_equal(multi_site_settings[[i]]$run$site$met.end, templateSettings$run$end.date) + expect_equal(multi_site_settings[[i]]$run$start.date, templateSettings$run$start.date) + expect_equal(multi_site_settings[[i]]$run$end.date, templateSettings$run$end.date) + expect_equal(multi_site_settings[[i]]$run$inputs, templateSettings$run$inputs) + } +}) + +test_that("`getRunSettings` able to build correct run settings for a given site id", { + templateSettings <- list( + run = list( + start.date = "2015-01-01", + end.date = "2015-12-31", + inputs = list("a", "b") + ) + ) + siteId <- "1000025731" + run_settings <- getRunSettings(templateSettings, siteId) + + expect_equal(run_settings$site$id, siteId) + expect_equal(run_settings$site$met.start, templateSettings$run$start.date) + expect_equal(run_settings$site$met.end, templateSettings$run$end.date) + expect_equal(run_settings$start.date, templateSettings$run$start.date) + expect_equal(run_settings$end.date, templateSettings$run$end.date) + expect_equal(run_settings$inputs, templateSettings$run$inputs) +}) + +test_that("`setOutDir` function sets main output directory and nulls out the others", { + settings <- list( + outdir = NULL, + rundir = "old_rundir", + modeloutdir = "old_modeloutdir", + host = list( + rundir = "old_host_rundir", + outdir = "old_host_outdir", + modeloutdir = "old_host_modeloutdir" + ), + pfts = list( + list(outdir = "old_outdir1"), + list(outdir = "old_outdir2") + ) + ) + + outDir <- "new_outdir" + updated_settings <- setOutDir(settings, outDir) + + expect_equal(updated_settings$outdir, outDir) + expect_equal(updated_settings$rundir, NULL) + expect_equal(updated_settings$modeloutdir, NULL) + expect_equal(updated_settings$host$rundir, NULL) + expect_equal(updated_settings$host$outdir, NULL) + expect_equal(updated_settings$host$modeloutdir, NULL) + for (j in seq_along(updated_settings$pfts)) { + expect_equal(updated_settings$pfts[[j]]$outdir, NULL) + } +}) + +test_that("`setDates` function sets start and end dates correctly", { + settings <- list( + run = list( + start.date = NULL, + end.date = NULL + ), + ensemble = list( + start.year = NULL, + end.year = NULL + ), + sensitivity.analysis = list( + start.year = NULL, + end.year = NULL + ) + ) + + startDate <- "2023-01-01" + endDate <- "2023-12-31" + updated_settings <- setDates(settings, startDate, endDate) + + expect_equal(updated_settings$run$start.date, startDate) + expect_equal(updated_settings$run$end.date, endDate) + expect_equal(updated_settings$ensemble$start.year, lubridate::year(startDate)) + expect_equal(updated_settings$ensemble$end.year, lubridate::year(endDate)) + expect_equal(updated_settings$sensitivity.analysis$start.year, lubridate::year(startDate)) + expect_equal(updated_settings$sensitivity.analysis$end.year, lubridate::year(endDate)) +}) diff --git a/base/settings/tests/testthat/test.deprecated.settings.R b/base/settings/tests/testthat/test.fix.deprecated.settings.R similarity index 78% rename from base/settings/tests/testthat/test.deprecated.settings.R rename to base/settings/tests/testthat/test.fix.deprecated.settings.R index 8960ac14f37..2d40fa47719 100644 --- a/base/settings/tests/testthat/test.deprecated.settings.R +++ b/base/settings/tests/testthat/test.fix.deprecated.settings.R @@ -1,12 +1,3 @@ -#---------------------------------------------------------------------------- - -## Copyright (c) 2012 University of Illinois, NCSA. -## All rights reserved. This program and the accompanying materials -## are made available under the terms of the -## University of Illinois/NCSA Open Source License -## which accompanies this distribution, and is available at -## http://opensource.ncsa.illinois.edu/license.html -#---------------------------------------------------------------------------- PEcAn.logger::logger.setQuitOnSevere(FALSE) PEcAn.logger::logger.setLevel("OFF") context("fix.deprecated.settings") diff --git a/base/settings/tests/testthat/test.get_args.R b/base/settings/tests/testthat/test.get_args.R new file mode 100644 index 00000000000..a23cb12eae9 --- /dev/null +++ b/base/settings/tests/testthat/test.get_args.R @@ -0,0 +1,18 @@ +test_that("`get_args` throws an error with missing settings file", { + withr::with_envvar(c(PECAN_SETTINGS = "doesnotexists.xml"), { + expect_error( + get_args(), + "--settings \"doesnotexists.xml\" not a valid file" + ) + }) +}) + +test_that("`get_args` works for existing settings file", { + withr::with_envvar(c(PECAN_SETTINGS = "pecan.xml"), { + mockery::stub(get_args, 'file.exists', TRUE) + args <- get_args() + expect_equal(args$settings, "pecan.xml") + expect_equal(args$continue, FALSE) + expect_equal(args$help, FALSE) + }) +}) \ No newline at end of file diff --git a/base/settings/tests/testthat/test.loadPath_sitePFT.R b/base/settings/tests/testthat/test.loadPath_sitePFT.R new file mode 100644 index 00000000000..3eeec027b0d --- /dev/null +++ b/base/settings/tests/testthat/test.loadPath_sitePFT.R @@ -0,0 +1,35 @@ +test_that("`loadPath.sitePFT` gives no return value for file extensions other than csv and txt", { + settings <- list(host = "pecan") + path <- "base/settings.R" + expect_silent(loadPath.sitePFT(settings, path)) +}) + +test_that("`loadPath.sitePFT` gives an error for file with number of columns not equal to 2", { + withr::with_tempfile("tf", fileext = ".csv", { + settings <- list(host = "pecan") + df <- data.frame( + h1 = c("1", "2", "3"), + h2 = c("a", "b", "c"), + h3 = c("d", "e", "f") + ) + write.csv(df, tf, row.names = FALSE) + expect_error( + loadPath.sitePFT(settings, tf), + "file does not have two columns." + ) + }) +}) + +test_that("`loadPath.sitePFT` works for correct format of input file",{ + withr::with_tempfile("tf", fileext = ".csv", { + settings <- list(host = "pecan") + df <- data.frame( + h1 = c("1", "2", "3"), + h2 = c("a", "b", "c") + ) + + write.csv(df, tf, row.names = FALSE) + links <- utils::read.table(tf, header = TRUE, sep = ",") + expect_equal(loadPath.sitePFT(settings, tf), `colnames<-`(links, c("site", "pft"))) + }) +}) \ No newline at end of file diff --git a/base/settings/tests/testthat/test.papply.R b/base/settings/tests/testthat/test.papply.R index dfe9a4fd721..bb53e591192 100644 --- a/base/settings/tests/testthat/test.papply.R +++ b/base/settings/tests/testthat/test.papply.R @@ -1,11 +1,3 @@ -#---------------------------------------------------------------------------- -## Copyright (c) 2012 University of Illinois, NCSA. -## All rights reserved. This program and the accompanying materials -## are made available under the terms of the -## University of Illinois/NCSA Open Source License -## which accompanies this distribution, and is available at -## http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------ context("test papply") diff --git a/base/settings/tests/testthat/test.pft_site_linker.R b/base/settings/tests/testthat/test.pft_site_linker.R new file mode 100644 index 00000000000..3126bbc8043 --- /dev/null +++ b/base/settings/tests/testthat/test.pft_site_linker.R @@ -0,0 +1,59 @@ +test_that("`site.pft.linkage` gives error for empty or incomplete lookup-table(LUT)", { + settings <- list() + expect_error( + site.pft.linkage(settings, NULL), + paste0( + "Your look up table should have two columns of site and pft", + " with site ids under site column and pft names under pft column." + ) + ) + + LUT <- data.frame(h1 = c("1000025731", "1000025731")) + + expect_error( + site.pft.linkage(settings, LUT), + paste0( + "Your look up table should have two columns of site and pft", + " with site ids under site column and pft names under pft column." + ) + ) +}) + +test_that("`site.pft.linkage` does not add site pft name when site id is not specified", { + settings <- list( + run = list( + site = list( + name = "test" + ) + ) + ) + LUT <- data.frame( + site = c("1000025731", "1000025732"), + pft = c("temperate.broadleaf.deciduous1", "temperate.needleleaf.evergreen") + ) + new_settings <- site.pft.linkage(settings, LUT) + expect_equal( + new_settings$run$site$site.pft, + NULL + ) +}) + +test_that("`site.pft.linkage` able to add site pft name if id is specified and is a part of the lookup-table(LUT)", { + settings <- list( + run = list( + site = list( + id = "1000025731" + ) + ) + ) + LUT <- data.frame( + site = c("1000025731", "1000025732"), + pft = c("temperate.broadleaf.deciduous1", "temperate.needleleaf.evergreen") + ) + + new_settings <- site.pft.linkage(settings, LUT) + expect_equal( + new_settings$run$site$site.pft$pft.name, + "temperate.broadleaf.deciduous1" + ) +}) \ No newline at end of file diff --git a/base/settings/tests/testthat/test.read.settings.R b/base/settings/tests/testthat/test.read.settings.R index 12b19f6f120..4911192c03f 100644 --- a/base/settings/tests/testthat/test.read.settings.R +++ b/base/settings/tests/testthat/test.read.settings.R @@ -1,12 +1,3 @@ -#---------------------------------------------------------------------------- - -## Copyright (c) 2012 University of Illinois, NCSA. -## All rights reserved. This program and the accompanying materials -## are made available under the terms of the -## University of Illinois/NCSA Open Source License -## which accompanies this distribution, and is available at -## http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------ context("tests for read.settings and related functions") PEcAn.logger::logger.setQuitOnSevere(FALSE) @@ -15,25 +6,56 @@ testdir <- tempfile() dir.create(testdir, showWarnings = FALSE) teardown(unlink(testdir, recursive = TRUE)) +test_that("`strip_comments()` function removes comments from nested lists", { + nestedList <- list( + "run", + "outdir", + list( + "database", + list( + "user", + comment = "A comment" + ) + ), + comment = "A comment" + ) + stripped_list <- strip_comments(nestedList) + expect_false("comment" %in% names(stripped_list)) + expect_false("comment" %in% names(stripped_list[[3]])) +}) test_that("read.settings() strips comments", { - s_comments <- read.settings("testsettings-comment.xml") - s <- read.settings("testsettings.xml") + s_comments <- read.settings("data/testsettings-comment.xml") + s <- read.settings("data/testsettings.xml") expect_equal(s_comments, s) }) test_that("read.settings() warns if named input file doesn't exist (but pecan.xml does)", { old_setting <- PEcAn.logger::logger.setLevel("DEBUG") + on.exit(PEcAn.logger::logger.setLevel(old_setting)) + + # this returns FALSE in the first call to the mock function, + # FALSE in the second call, and TRUE in the third call + m <- mockery::mock(FALSE, FALSE, TRUE) + mockery::stub(read.settings, 'file.exists', m) + mockery::stub( + read.settings, + 'XML::xmlParse', + " + + test + + ") + #hacky way to check for errors b/c PEcAn.logger errors are non-standard and #not captured by testthat::expect_message() or expect_error() x <- capture.output( read.settings("blahblahblah.xml"), type = "message" ) + expect_equal(length(mockery::mock_calls(m)), 3) expect_true(any(grepl("WARN", x))) expect_true(any(grepl("blahblahblah.xml not found", x))) - - PEcAn.logger::logger.setLevel(old_setting) }) test_that("read settings returns error if no settings file found (#1124)", { diff --git a/base/settings/tests/testthat/test.site_pft_link_settings.R b/base/settings/tests/testthat/test.site_pft_link_settings.R new file mode 100644 index 00000000000..061a46122b9 --- /dev/null +++ b/base/settings/tests/testthat/test.site_pft_link_settings.R @@ -0,0 +1,26 @@ +test_that("`site.pft.link.settings` able to link sites to pfts and update settings accordingly", { + withr::with_tempfile("tf", fileext = ".csv", { + settings <- list( + host = "pecan", + run = list( + inputs = list( + pft.site = list( + path = tf + ) + ) + ) + ) + df <- data.frame( + site = c("1000025731", "1000025732"), + pft = c("temperate.broadleaf.deciduous1", "temperate.needleleaf.evergreen") + ) + write.csv(df, tf, row.names = FALSE) + updated_settings <- site.pft.link.settings(settings) + print(updated_settings) + print(length(updated_settings$pfts)) + for(i in 1:length(updated_settings$pfts)) { + expect_equal(updated_settings$pfts[[i]]$name, df$pft[i]) + expect_equal(updated_settings$pfts$pft$constants, 1) + } + }) +}) \ No newline at end of file diff --git a/base/settings/tests/testthat/test.write.settings.R b/base/settings/tests/testthat/test.write.settings.R new file mode 100644 index 00000000000..fe54129af0b --- /dev/null +++ b/base/settings/tests/testthat/test.write.settings.R @@ -0,0 +1,13 @@ +test_that("`write.settings` able to write a settings file based on input list",{ + withr::with_tempfile("tf", fileext=".xml",{ + writeLines( + " + testdir + ", + con = tf) + t <- XML::xmlToList(XML::xmlParse(tf)) + mockery::stub(write.settings, 'file.path', tf) + expect_equal(write.settings(t, tf), tf) + expect_equal(XML::xmlToList(XML::xmlParse(tf)), t) + }) +}) \ No newline at end of file diff --git a/base/utils/DESCRIPTION b/base/utils/DESCRIPTION index 7a6df1a73c4..72948b3d713 100644 --- a/base/utils/DESCRIPTION +++ b/base/utils/DESCRIPTION @@ -2,8 +2,7 @@ Package: PEcAn.utils Type: Package Title: PEcAn Functions Used for Ecological Forecasts and Reanalysis -Version: 1.7.2 -Date: 2021-10-04 +Version: 1.8.0.9000 Authors@R: c(person("Mike", "Dietze", role = c("aut"), email = "dietze@bu.edu"), person("Rob", "Kooper", role = c("aut", "cre"), @@ -48,13 +47,15 @@ Suggests: data.table, ggplot2, MASS, + mockery, randtoolbox, rjags, testthat (>= 2.0.0), + withr, xtable License: BSD_3_clause + file LICENSE Copyright: Authors LazyData: true Encoding: UTF-8 -RoxygenNote: 7.2.3 +RoxygenNote: 7.3.2 Roxygen: list(markdown = TRUE) diff --git a/base/utils/LICENSE b/base/utils/LICENSE index 9e38c2dc685..09ef35a60b4 100644 --- a/base/utils/LICENSE +++ b/base/utils/LICENSE @@ -1,29 +1,3 @@ -University of Illinois/NCSA Open Source License - -Copyright (c) 2012, University of Illinois, NCSA. All rights reserved. - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal with the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -- Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimers. -- Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimers in the - documentation and/or other materials provided with the distribution. -- Neither the names of University of Illinois, NCSA, nor the names - of its contributors may be used to endorse or promote products - derived from this Software without specific prior written permission. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR -ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF -CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE SOFTWARE. - +YEAR: 2024 +COPYRIGHT HOLDER: PEcAn Project +ORGANIZATION: PEcAn Project, authors affiliations diff --git a/base/utils/NAMESPACE b/base/utils/NAMESPACE index 300bd155fbb..2f8d208fcec 100644 --- a/base/utils/NAMESPACE +++ b/base/utils/NAMESPACE @@ -60,6 +60,7 @@ export(ud_convert) export(unit_is_parseable) export(units_are_equivalent) export(vecpaste) +export(zero.bounded.density) export(zero.truncate) importFrom(magrittr,"%>%") importFrom(rlang,.data) diff --git a/base/utils/NEWS.md b/base/utils/NEWS.md index 165f61a8ebb..05aa69b5d67 100644 --- a/base/utils/NEWS.md +++ b/base/utils/NEWS.md @@ -1,7 +1,24 @@ -# PEcAn.DB 1.7.2.9000 +# PEcAn.utils 1.8.0.9000 + +## License change +* PEcAn.utils is now distributed under the BSD three-clause license instead of the NCSA Open Source license. + +## Added +* utility function `zero.bounded.density` is now exported. + + +# PEcAn.utils 1.8.0 ## Added * Shifted `convert.input` function from `PEcAn.utils` to `PEcAn.DB` with a new name `convert_input`to remove circular dependency. (#3026; @nanu1605) * Added a stub function `convert.input`. (#3026; @nanu1605) +* Updated unit conversions throughout PEcAn to use the `units` R package instead of the unmaintained `udunits2`. Note that both `units` and `udunits2` interface with the same underlying compiled code, so the `udunits2` *system library* is still required. (#2989; @nanu1605) +* Fixed a bug in `ud_convert()` where it failed with objects of class "difftime" introduced by refactoring to use the `units` package instead of `udunits` (#3012) + +# PEcAn.utils 1.7.1 + +* All changes in 1.7.1 and earlier were recorded in a single file for all of + the PEcAn packages; please see + https://github.com/PecanProject/pecan/blob/v1.7.1/CHANGELOG.md for details. \ No newline at end of file diff --git a/base/utils/R/Defunct.R b/base/utils/R/Defunct.R index e74d3e3c425..4533ce75891 100644 --- a/base/utils/R/Defunct.R +++ b/base/utils/R/Defunct.R @@ -1,5 +1,6 @@ -#' @title Defunct functions in PEcAn.utils -#' @description The functions listed below are defunct and have been removed from the package. +#' Defunct functions in PEcAn.utils +#' +#' The functions listed below are defunct and have been removed from the package. #' Calling them will produce a message indicating what function, if any, has replaced it. #' #' @name PEcAn.utils-defunct diff --git a/base/utils/R/cf2date.R b/base/utils/R/cf2date.R index bef5fb9d795..79380c8fc50 100644 --- a/base/utils/R/cf2date.R +++ b/base/utils/R/cf2date.R @@ -81,4 +81,4 @@ datetime2doy <- function(datetime, tz = "UTC") { #' #' @author Alexey Shiklomanov #' -cf2doy <- function(value, unit, tz = "UTC") datetime2doy(cf2datetime(value, unit, tz), tz) \ No newline at end of file +cf2doy <- function(value, unit, tz = "UTC") datetime2doy(cf2datetime(value, unit, tz), tz) diff --git a/base/utils/R/clear.scratch.R b/base/utils/R/clear.scratch.R index afc37df0221..924f8e3d149 100644 --- a/base/utils/R/clear.scratch.R +++ b/base/utils/R/clear.scratch.R @@ -1,24 +1,14 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2012 University of Illinois, NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# University of Illinois/NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- -##' Removes previous model run output from worker node local scratch directories on EBI-CLUSTER -##' -##' @title Clear EBI-CLUSTER worker node local scratch directories of old PEcAn output -##' @name clear.scratch -##' @author Shawn Serbin -##' @param settings list of PEcAn settings. Only \code{settings$host$name} is used -##' @return nothing -##' @export -##' @examples -##' \dontrun{ -##' clear.scratch(settings) -##' } +#' Removes previous model run output from worker node local scratch directories on EBI-CLUSTER +#' +#' @author Shawn Serbin +#' @param settings list of PEcAn settings. Only \code{settings$host$name} is used +#' @return nothing +#' @export +#' @examples +#' \dontrun{ +#' clear.scratch(settings) +#' } clear.scratch <- function(settings) { ### Setup script diff --git a/base/utils/R/datasets.R b/base/utils/R/datasets.R index bc3dd073564..38f3e27a92c 100644 --- a/base/utils/R/datasets.R +++ b/base/utils/R/datasets.R @@ -13,7 +13,7 @@ #' @format data frame, all columns character #' \describe{ #' \item{Variable.Name}{Short name suitable for programming with} -#' \item{standard_name}{Name used in the NetCDF \href{http://cfconventions.org/standard-names.html}{CF metadata conventions} } +#' \item{standard_name}{Name used in the NetCDF \href{http://cfconventions.org/Data/cf-standard-names/current/build/cf-standard-name-table.html}{CF metadata conventions} } #' \item{Units}{Standard units for this variable. Do not call variables by these names if they are in different units. #' See `ud_convert` for conversions to and from non-standard units} #' \item{Long.Name}{Human-readable variable name, suitable for e.g. axis labels} diff --git a/base/utils/R/distn.stats.R b/base/utils/R/distn.stats.R index 6cf66acaac5..f8ee4ddd1df 100644 --- a/base/utils/R/distn.stats.R +++ b/base/utils/R/distn.stats.R @@ -1,15 +1,16 @@ -##' Implementation of standard equations used to calculate mean and sd for a variety of -##' named distributions different -##' -##' @title Distribution Stats -##' @param distn named distribution, one of 'beta', 'exp', 'f', 'gamma', 'lnorm', 'norm', 't', -##' @param a numeric; first parameter of \code{distn} -##' @param b numeric; second parameter of \code{distn} -##' @return vector with mean and standard deviation -##' @export -##' @author David LeBauer -##' @examples -##' distn.stats('norm', 0, 1) +#' Distribution Stats +#' +#' Implementation of standard equations used to calculate mean and sd for a variety of +#' named distributions different +#' +#' @param distn named distribution, one of 'beta', 'exp', 'f', 'gamma', 'lnorm', 'norm', 't', +#' @param a numeric; first parameter of \code{distn} +#' @param b numeric; second parameter of \code{distn} +#' @return vector with mean and standard deviation +#' @export +#' @author David LeBauer +#' @examples +#' distn.stats('norm', 0, 1) distn.stats <- function(distn, a, b) { mean <- sd <- NULL if (distn == "beta") { @@ -44,14 +45,15 @@ distn.stats <- function(distn, a, b) { } # distn.stats -##' a helper function for computing summary statistics of a parametric distribution -##' -##' @title return mean and standard deviation of a distribution for each distribution in a table with \code{colnames = c('distn', 'a', 'b')}, -##' e.g. in a table of priors -##' @param distns table of distributions; see examples -##' @return named vector of mean and SD -##' @export -##' @author David LeBauer +#' Helper function for computing summary statistics of a parametric distribution +#' +#' return mean and standard deviation of a distribution for each distribution in a table with \code{colnames = c('distn', 'a', 'b')}, +#' e.g. in a table of priors +#' +#' @param distns table of distributions; see examples +#' @return named vector of mean and SD +#' @export +#' @author David LeBauer distn.table.stats <- function(distns) { y <- as.data.frame(matrix(NA, nrow(distns), 2)) for (i in seq_len(nrow(distns))) { diff --git a/base/utils/R/download.url.R b/base/utils/R/download.url.R index 557493ee80c..06561891332 100644 --- a/base/utils/R/download.url.R +++ b/base/utils/R/download.url.R @@ -1,25 +1,23 @@ -##' Try and download a file. -##' -##' This will download a file, if retry is set and 404 is returned it will -##' wait until the file is available. If the file is still not available -##' after timeout tries, it will return NA. If the file is downloaded -##' it will return the name of the file -##' -##' @name download.url -##' @title Download file from the url. -##' @export -##' @param url the url of the file to download -##' @param file the filename -##' @param timeout number of seconds to wait for file (default 600) -##' @param .opts list of options for curl, for example to download from a -##' protected site use list(userpwd=userpass, httpauth = 1L) -##' @param retry retry if url not found yet, this is used by Brown Dog -##' @return returns name of file if successful or NA if not. -##' -##' @examples -##' \dontrun{ -##' download.url('http://localhost/', index.html) -##' } +#' Try and download a file. +#' +#' This will download a file, if retry is set and 404 is returned it will +#' wait until the file is available. If the file is still not available +#' after timeout tries, it will return NA. If the file is downloaded +#' it will return the name of the file +#' +#' @export +#' @param url the url of the file to download +#' @param file the filename +#' @param timeout number of seconds to wait for file (default 600) +#' @param .opts list of options for curl, for example to download from a +#' protected site use list(userpwd=userpass, httpauth = 1L) +#' @param retry retry if url not found yet, this is used by Brown Dog +#' @return returns name of file if successful or NA if not. +#' +#' @examples +#' \dontrun{ +#' download.url('http://localhost/', index.html) +#' } download.url <- function(url, file, timeout = 600, .opts = list(), retry = TRUE) { count <- 0 while (retry && !url_found(url) && count < timeout) { diff --git a/base/utils/R/full.path.R b/base/utils/R/full.path.R index 7a0b681cdb0..1221a81513a 100644 --- a/base/utils/R/full.path.R +++ b/base/utils/R/full.path.R @@ -1,26 +1,16 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2012 University of Illinois, NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# University of Illinois/NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- -##' Creates an absolute path to a folder. -##' -##' This will take a folder and make it into an absolute folder name. It -##' will normalize the path and prepend it with the current working folder -##' if needed to get an absolute path name. -##' -##' @title Creates an absolute path to a folder -##' @name full.path -##' @param folder folder for file paths. -##' @author Rob Kooper -##' @return absolute path -##' @export -##' @examples -##' full.path('pecan') +#' Creates an absolute path to a folder. +#' +#' This will take a folder and make it into an absolute folder name. It +#' will normalize the path and prepend it with the current working folder +#' if needed to get an absolute path name. +#' +#' @param folder folder for file paths. +#' @author Rob Kooper +#' @return absolute path +#' @export +#' @examples +#' full.path('pecan') full.path <- function(folder) { # normalize pathname folder <- normalizePath(folder, mustWork = FALSE) diff --git a/base/utils/R/get.ensemble.inputs.R b/base/utils/R/get.ensemble.inputs.R index bcc7a5553ea..3377dd507ea 100644 --- a/base/utils/R/get.ensemble.inputs.R +++ b/base/utils/R/get.ensemble.inputs.R @@ -1,14 +1,16 @@ ## split clim file into smaller time units to use in KF -##' @title get.ensemble.inputs -##' @name get.ensemble.inputs -##' @author Mike Dietze and Ann Raiho -##' -##' @param settings PEcAn settings list -##' @param ens ensemble number. default = 1 -##' @description Splits climate met for SIPNET -##' -##' @return find correct ensemble inputs -##' @export + +#' get.ensemble.inputs +#' +#' Splits climate met for SIPNET +#' +#' @author Mike Dietze and Ann Raiho +#' +#' @param settings PEcAn settings list +#' @param ens ensemble number. default = 1 +#' +#' @return find correct ensemble inputs +#' @export get.ensemble.inputs <- function(settings, ens = 1){ diff --git a/base/utils/R/help.R b/base/utils/R/help.R index 247c0c697dd..175d8f302ef 100644 --- a/base/utils/R/help.R +++ b/base/utils/R/help.R @@ -1,37 +1,43 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2012 University of Illinois, NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# University of Illinois/NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- - -##' R package to support PEcAn, the Predictive Ecosystem Analyzer -##' -##' Instructions for the use of this package are provided in the project documentation \url{https://pecan.gitbooks.io/pecan-documentation/content/}. -##' -##' Project homepage: \url{pecanproject.org} -##' -##' Description of PEcAn -##' -##' The Predictive Ecosystem Analyzer (PEcAn) is a scientific workflow management tool that is designed to simplify the management of model parameterization, execution, and analysis. The goal of PEcAn is to streamline the interaction between data and models, and to improve the efficacy of scientific investigation. PEcAn is an open source utility that encapsulates: -##' -##' 1. acquisition of meteorological inputs -##' 2. synthesis of physiological trait data as the posterior distribution of a Bayesian meta-analysis -##' 3. sampling trait meta-analysis posterior distributions to parameterize ensembles of ED2 and other ecophysiological models -##' 4. probabilistic forecasts -##' 5. postprocessing to constrain forecasts and model parameters with field, meterological, eddy flux, and spectral data, and -##' 6. provenance tracking -##' -##' PECAn integrates available data into ecological forecasts by running ensembles of a terrestrial ecosystem model that is parameterized by the posterior distribution from a meta-analysis of available plant trait data. -##' These trait data are assembled from field research and primary literature, and are stored in a PostgreSQL database. Current development focused on biofuel crops uses BETYdb. -##' In addition to generating forecasts that reflect available data, PEcAn quantifies the contribution of each parameter to model uncertainty. -##' This information informs targeted data collection and synthesis efforts that most efficiently reduce forecast uncertainty. -##' -##' Current development is focused on developing PEcAn into a real-time data assimilation and forecasting system. This system will provide a detailed analysis of the past and present ecosystem functioning that seamlessly transitions into forecasts. -##' -##' @docType package -##' @name PEcAn -##' @aliases PECAn pecan package-pecan -NULL +#' R package to support PEcAn, the Predictive Ecosystem Analyzer +#' +#' Instructions for the use of this package are provided in the project +#' documentation \url{https://pecanproject.github.io/documentation.html}. +#' +#' Project homepage: \url{pecanproject.org} +#' +#' Description of PEcAn +#' +#' The Predictive Ecosystem Analyzer (PEcAn) is a scientific workflow management +#' tool that is designed to simplify the management of model parameterization, +#' execution, and analysis. The goal of PEcAn is to streamline the interaction +#' between data and models, and to improve the efficacy of scientific +#' investigation. PEcAn is an open source utility that encapsulates: +#' +#' 1. acquisition of meteorological inputs +#' 2. synthesis of physiological trait data as the posterior distribution of a +#' Bayesian meta-analysis +#' 3. sampling trait meta-analysis posterior distributions to parameterize +#' ensembles of ED2 and other ecophysiological models +#' 4. probabilistic forecasts +#' 5. postprocessing to constrain forecasts and model parameters with field, +#' meterological, eddy flux, and spectral data, and +#' 6. provenance tracking +#' +#' PECAn integrates available data into ecological forecasts by running +#' ensembles of a terrestrial ecosystem model that is parameterized by the +#' posterior distribution from a meta-analysis of available plant trait data. +#' These trait data are assembled from field research and primary literature, +#' and are stored in a PostgreSQL database. Current development focused on +#' biofuel crops uses BETYdb. In addition to generating forecasts that reflect +#' available data, PEcAn quantifies the contribution of each parameter to model +#' uncertainty. This information informs targeted data collection and synthesis +#' efforts that most efficiently reduce forecast uncertainty. +#' +#' Current development is focused on developing PEcAn into a real-time data +#' assimilation and forecasting system. This system will provide a detailed +#' analysis of the past and present ecosystem functioning that seamlessly +#' transitions into forecasts. +#' +#' @name PEcAn +#' @aliases PECAn pecan package-pecan +"_PACKAGE" diff --git a/base/utils/R/listToArgString.R b/base/utils/R/listToArgString.R index 68d56d599d2..880c07d0007 100644 --- a/base/utils/R/listToArgString.R +++ b/base/utils/R/listToArgString.R @@ -1,11 +1,11 @@ -##' format a list of arguments as one comma-separated string -##' -##' @export -##' -##' @param l a named list of function arguments -##' @return A string containing named argument/value pairs separated by commas -##' -##' @author Ryan Kelly +#' format a list of arguments as one comma-separated string +#' +#' @export +#' +#' @param l a named list of function arguments +#' @return A string containing named argument/value pairs separated by commas +#' +#' @author Ryan Kelly ## This little utility is used in a few places in data.atmosphere. listToArgString <- function(l) { arg.string <- "" diff --git a/base/utils/R/mail.R b/base/utils/R/mail.R index 82d22390644..35735aab676 100644 --- a/base/utils/R/mail.R +++ b/base/utils/R/mail.R @@ -1,25 +1,17 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2012 University of Illinois, NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# University of Illinois/NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- -##' Sends email. This assumes the program sendmail is installed. -##' -##' @param from the sender of the mail message -##' @param to the receipient of the mail message -##' @param subject the subject of the mail message -##' @param body the body of the mail message -##' @author Rob Kooper -##' @return nothing -##' @export -##' @examples -##' \dontrun{ -##' sendmail('bob@@example.com', 'joe@@example.com', 'Hi', 'This is R.') -##' } +#' Sends email. This assumes the program sendmail is installed. +#' +#' @param from the sender of the mail message +#' @param to the receipient of the mail message +#' @param subject the subject of the mail message +#' @param body the body of the mail message +#' @author Rob Kooper +#' @return nothing +#' @export +#' @examples +#' \dontrun{ +#' sendmail('bob@@example.com', 'joe@@example.com', 'Hi', 'This is R.') +#' } sendmail <- function(from, to, subject, body) { if (is.null(to)) { PEcAn.logger::logger.error("No receipient specified, mail is not send.") @@ -32,10 +24,9 @@ sendmail <- function(from, to, subject, body) { cat(paste0("From: ", from, "\n", "Subject: ", subject, "\n", "To: ", to, "\n", "\n", - body), file = mailfile) + body, "\n"), file = mailfile) system2(sendmail, c("-f", paste0("\"", from, "\""), paste0("\"", to, "\""), "<", mailfile)) unlink(mailfile) } } # sendmail - diff --git a/base/utils/R/mcmc.list2init.R b/base/utils/R/mcmc.list2init.R index 73ee039d0c8..b4571181aa8 100644 --- a/base/utils/R/mcmc.list2init.R +++ b/base/utils/R/mcmc.list2init.R @@ -30,7 +30,7 @@ mcmc.list2init <- function(dat) { ## define variables ic <- list() - n <- nrow(dat[[1]]) + nr <- nrow(dat[[1]]) nc <- coda::nchain(dat) for(c in seq_len(nc)) ic[[c]] <- list() diff --git a/base/utils/R/n_leap_day.R b/base/utils/R/n_leap_day.R index 0bbdae568e2..fffa28c3f09 100644 --- a/base/utils/R/n_leap_day.R +++ b/base/utils/R/n_leap_day.R @@ -1,9 +1,9 @@ -##' @name n_leap_day -##' @title n_leap_day -##' @description number of leap days between two dates -##' @author Mike Dietze -##' @param start_date,end_date dates in any format recognized by \code{\link[base]{as.Date}} -##' @export +#' n_leap_day +#' +#' number of leap days between two dates +#' @author Mike Dietze +#' @param start_date,end_date dates in any format recognized by \code{\link[base]{as.Date}} +#' @export n_leap_day <- function(start_date, end_date) { ## make sure dates are formatted correctly diff --git a/base/utils/R/r2bugs.distributions.R b/base/utils/R/r2bugs.distributions.R index 0e832c3183d..1ec86414388 100644 --- a/base/utils/R/r2bugs.distributions.R +++ b/base/utils/R/r2bugs.distributions.R @@ -1,26 +1,17 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2012 University of Illinois, NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# University of Illinois/NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- - -##' convert R parameterizations to BUGS paramaterizations -##' -##' R and BUGS have different parameterizations for some distributions. This function transforms the distributions from R defaults to BUGS defaults. BUGS is an implementation of the BUGS language, and these transformations are expected to work for bugs. -##' @title convert R parameterizations to BUGS paramaterizations -##' @param priors data.frame with columns distn = distribution name, parama, paramb using R default parameterizations. -##' @param direction One of "r2bugs" or "bugs2r" -##' @return priors dataframe using JAGS default parameterizations -##' @author David LeBauer, Ben Bolker -##' @export -##' @examples -##' priors <- data.frame(distn = c('weibull', 'lnorm', 'norm', 'gamma'), -##' parama = c(1, 1, 1, 1), -##' paramb = c(2, 2, 2, 2)) -##' r2bugs.distributions(priors) +#' convert R parameterizations to BUGS paramaterizations +#' +#' R and BUGS have different parameterizations for some distributions. This function transforms the distributions from R defaults to BUGS defaults. BUGS is an implementation of the BUGS language, and these transformations are expected to work for bugs. +#' +#' @param priors data.frame with columns distn = distribution name, parama, paramb using R default parameterizations. +#' @param direction One of "r2bugs" or "bugs2r" +#' @return priors dataframe using JAGS default parameterizations +#' @author David LeBauer, Ben Bolker +#' @export +#' @examples +#' priors <- data.frame(distn = c('weibull', 'lnorm', 'norm', 'gamma'), +#' parama = c(1, 1, 1, 1), +#' paramb = c(2, 2, 2, 2)) +#' r2bugs.distributions(priors) r2bugs.distributions <- function(priors, direction = "r2bugs") { priors$distn <- as.character(priors$distn) @@ -76,19 +67,19 @@ bugs2r.distributions <- function(..., direction = "bugs2r") { } # bugs2r.distributions -##' Sample from an R distribution using JAGS -##' -##' Takes a distribution with R parameterization, converts it to a -##' BUGS parameterization, and then samples from the distribution using -##' JAGS -##' @title bugs.rdist -##' @param prior dataframe with distribution name and parameters -##' @param n.iter number of MCMC samples. Output will have n.iter/4 samples -##' @param n number of randomly chosen samples to return. +#' Sample from an R distribution using JAGS +#' +#' Takes a distribution with R parameterization, converts it to a +#' BUGS parameterization, and then samples from the distribution using +#' JAGS +#' +#' @param prior dataframe with distribution name and parameters +#' @param n.iter number of MCMC samples. Output will have n.iter/4 samples +#' @param n number of randomly chosen samples to return. ## If NULL, returns all n.iter/4 of them -##' @return vector of samples -##' @export -##' @author David LeBauer +#' @return vector of samples +#' @export +#' @author David LeBauer bugs.rdist <- function(prior = data.frame(distn = "norm", parama = 0, paramb = 1), n.iter = 1e+05, n = NULL) { need_packages("rjags") diff --git a/base/utils/R/read.output.R b/base/utils/R/read.output.R index 2929c3cb76b..a641b113aa3 100644 --- a/base/utils/R/read.output.R +++ b/base/utils/R/read.output.R @@ -1,62 +1,54 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2012 University of Illinois, NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# University of Illinois/NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- - -##' Read model output -##' -##' Reads the output of a single model run -##' -##' Generic function to convert model output from model-specific format to -##' a common PEcAn format. This function uses MsTMIP variables except that units of -##' (kg m-2 d-1) are converted to kg ha-1 y-1. Currently this function converts -##' Carbon fluxes: GPP, NPP, NEE, TotalResp, AutoResp, HeteroResp, -##' DOC_flux, Fire_flux, and Stem (Stem is specific to the BioCro model) -##' and Water fluxes: Evaporation (Evap), Transpiration (TVeg), -##' surface runoff (Qs), subsurface runoff (Qsb), and rainfall (Rainf). -##' For more details, see the [MsTMIP -##' variables](http://nacp.ornl.gov/MsTMIP_variables.shtml) -##' documentation. -##' -##' @param runid the ID distinguishing the model run. Can be omitted -##' if `ncfiles` is set. -##' @param outdir the directory that the model's output was sent to. -##' Can be omitted if `ncfiles` is set. -##' @param variables Character vector of variables to be read from -##' model output. Default = `"GPP"`. If `NULL`, try to read all -##' variables in output file.. -##' @param dataframe Logical: if TRUE, will return output in a -##' `data.frame` format with a posix column. Useful for -##' `PEcAn.benchmark::align.data` and plotting. -##' @param pft.name character string, name of the plant functional -##' type (PFT) to read PFT-specific output. If `NULL` no -##' PFT-specific output will be read even the variable has PFT as a -##' dimension. -##' @param ncfiles Custom character vector of full paths to NetCDF -##' files. If `NULL` (default), this list is constructed -##' automatically by looking for `YYYY.nc` files in -##' `file.path(outdir, runid)`. -##' @param verbose Logical. If `TRUE`, print status as every year and -##' variable is read, as well as all NetCDF diagnostics (from -##' `verbose` argument to, e.g., [ncdf4::nc_open()]) (default = -##' `FALSE`). -##' @param print_summary Logical. If `TRUE` (default), calculate and -##' print a summary of the means of each variable for each year. -##' @param start.year,end.year first and last year of output to read. -##' Specify as a date-time (only the year portion is used) or as a -##' four-digit number or string. If `NA`, reads all years found in -##' `outdir`. -##' @return If `dataframe = FALSE`, a vector of output variables. If -##' `dataframe = TRUE`, a `data.frame` of output variables with -##' POSIXct timestamps added (`posix` column). The `posix` column -##' is in seconds after January 1 of `start.year`, or 1970 if -##' `start.year` is not provided. -##' @export -##' @author Michael Dietze, David LeBauer, Alexey Shiklomanov +#' Read model output +#' +#' Reads the output of a single model run +#' +#' Generic function to convert model output from model-specific format to +#' a common PEcAn format. This function uses MsTMIP variables except that units of +#' (kg m-2 d-1) are converted to kg ha-1 y-1. Currently this function converts +#' Carbon fluxes: GPP, NPP, NEE, TotalResp, AutoResp, HeteroResp, +#' DOC_flux, Fire_flux, and Stem (Stem is specific to the BioCro model) +#' and Water fluxes: Evaporation (Evap), Transpiration (TVeg), +#' surface runoff (Qs), subsurface runoff (Qsb), and rainfall (Rainf). +#' +#' For more details, see the [MsTMIP +#' variables](http://nacp.ornl.gov/MsTMIP_variables.shtml) +#' documentation. +#' +#' @param runid the ID distinguishing the model run. Can be omitted +#' if `ncfiles` is set. +#' @param outdir the directory that the model's output was sent to. +#' Can be omitted if `ncfiles` is set. +#' @param variables Character vector of variables to be read from +#' model output. Default = `"GPP"`. If `NULL`, try to read all +#' variables in output file.. +#' @param dataframe Logical: if TRUE, will return output in a +#' `data.frame` format with a posix column. Useful for +#' `PEcAn.benchmark::align.data` and plotting. +#' @param pft.name character string, name of the plant functional +#' type (PFT) to read PFT-specific output. If `NULL` no +#' PFT-specific output will be read even the variable has PFT as a +#' dimension. +#' @param ncfiles Custom character vector of full paths to NetCDF +#' files. If `NULL` (default), this list is constructed +#' automatically by looking for `YYYY.nc` files in +#' `file.path(outdir, runid)`. +#' @param verbose Logical. If `TRUE`, print status as every year and +#' variable is read, as well as all NetCDF diagnostics (from +#' `verbose` argument to, e.g., [ncdf4::nc_open()]) (default = +#' `FALSE`). +#' @param print_summary Logical. If `TRUE` (default), calculate and +#' print a summary of the means of each variable for each year. +#' @param start.year,end.year first and last year of output to read. +#' Specify as a date-time (only the year portion is used) or as a +#' four-digit number or string. If `NA`, reads all years found in +#' `outdir`. +#' @return If `dataframe = FALSE`, a vector of output variables. If +#' `dataframe = TRUE`, a `data.frame` of output variables with +#' POSIXct timestamps added (`posix` column). The `posix` column +#' is in seconds after January 1 of `start.year`, or 1970 if +#' `start.year` is not provided. +#' @export +#' @author Michael Dietze, David LeBauer, Alexey Shiklomanov read.output <- function(runid, outdir, start.year = NA, end.year = NA, diff --git a/base/utils/R/remove.config.R b/base/utils/R/remove.config.R index 2f40a353fa1..b9407fdb90d 100644 --- a/base/utils/R/remove.config.R +++ b/base/utils/R/remove.config.R @@ -1,12 +1,3 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2012 University of Illinois, NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# University of Illinois/NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- - remove.config <- function(dir, settings, model) { fcn.name <- paste0("remove.config.", model) diff --git a/base/utils/R/status.R b/base/utils/R/status.R index 868be2d091f..41e340d0ebf 100644 --- a/base/utils/R/status.R +++ b/base/utils/R/status.R @@ -7,8 +7,7 @@ #' Records the progress of a PEcAn workflow by writing statuses and timestamps #' to a STATUS file. Use these each time a module starts, finishes, #' or is skipped. - -#' @details +#' #' All of these functions write to or read from a STATUS file in your run's #' output directory. If the file is not specified in the call, they will look #' for a `settings` object in the global environment and use diff --git a/base/utils/R/timezone_hour.R b/base/utils/R/timezone_hour.R index 0ea9a63b0af..b61c450d817 100644 --- a/base/utils/R/timezone_hour.R +++ b/base/utils/R/timezone_hour.R @@ -1,14 +1,14 @@ -##' @name timezone_hour -##' @title timezone_hour -##' @description return the number of hours offset to UTC for a timezone. -##' @author Rob Kooper -##' @param timezone to be converted -##' @return hours offset of the timezone -##' @examples -##' \dontrun{ -##' timezone_hour('America/New_York') -##' } -##' @export +#' Timezone Hour +#' +#' Returns the number of hours offset to UTC for a timezone. +#' @author Rob Kooper +#' @param timezone to be converted +#' @return hours offset of the timezone +#' @examples +#' \dontrun{ +#' timezone_hour('America/New_York') +#' } +#' @export timezone_hour <- function(timezone) { if (is.numeric(timezone)) { return(timezone) diff --git a/base/utils/R/to_nc.R b/base/utils/R/to_nc.R index 6b244be05c0..9ee1d38e047 100644 --- a/base/utils/R/to_nc.R +++ b/base/utils/R/to_nc.R @@ -1,12 +1,12 @@ -##' Make some values into an NCDF dimension variable -##' -##' Units and longnames are looked up from the \code{\link{standard_vars}} table -##' @export -##' -##' @param dimname character vector, standard dimension name (must be in PEcAn.utils::standard_vars) -##' @param vals values of dimension; can be single value or vector -##' @return ncdim defined according to standard_vars -##' @author Anne Thomas +#' Make some values into an NCDF dimension variable +#' +#' Units and longnames are looked up from the \code{\link{standard_vars}} table +#' @export +#' +#' @param dimname character vector, standard dimension name (must be in PEcAn.utils::standard_vars) +#' @param vals values of dimension; can be single value or vector +#' @return ncdim defined according to standard_vars +#' @author Anne Thomas to_ncdim <- function(dimname,vals){ dim <- PEcAn.utils::standard_vars[which(PEcAn.utils::standard_vars$Variable.Name == dimname),] #check dim exists @@ -30,14 +30,14 @@ to_ncdim <- function(dimname,vals){ } #to_ncdim -##' Define an NCDF variable -##' -##' @export -##' -##' @param varname character vector, standard variable name (must be in PEcAn.utils::standard_vars) -##' @param dims list of previously defined ncdims (function will match subset of dims for this variable in standard_vars; can include other dims--enables lapply.) -##' @return ncvar defined according to standard_vars -##' @author Anne Thomas +#' Define an NCDF variable +#' +#' @export +#' +#' @param varname character vector, standard variable name (must be in PEcAn.utils::standard_vars) +#' @param dims list of previously defined ncdims (function will match subset of dims for this variable in standard_vars; can include other dims--enables lapply.) +#' @return ncvar defined according to standard_vars +#' @author Anne Thomas to_ncvar <- function(varname,dims){ nc_var <- PEcAn.utils::standard_vars[which(PEcAn.utils::standard_vars$Variable.Name == varname),] #check nc_var exists diff --git a/base/utils/R/transformstats.R b/base/utils/R/transformstats.R index d128f0d785a..6c3bf7e4bc3 100644 --- a/base/utils/R/transformstats.R +++ b/base/utils/R/transformstats.R @@ -1,22 +1,22 @@ -##' Transform misc. statistics to SE -##' -##' Automates transformations of SD, MSE, LSD, 95%CI, HSD, and MSD -##' to conservative estimates of SE. -##' Method details and assumptions described in -##' LeBauer 2020 Transforming ANOVA and Regression statistics for Meta-analysis. -##' Authorea. DOI: https://doi.org/10.22541/au.158359749.96662550 -##' @param data data frame with columns for mean, statistic, n, -##' and statistic name -##' @return data frame with statistics transformed to SE -##' @author David LeBauer -##' @export -##' @examples -##' statdf <- data.frame(Y=rep(1,5), -##' stat=rep(1,5), -##' n=rep(4,5), -##' statname=c('SD', 'MSE', 'LSD', 'HSD', 'MSD')) -##' transformstats(statdf) +#' Transform misc. statistics to SE +#' +#' Automates transformations of SD, MSE, LSD, 95%CI, HSD, and MSD +#' to conservative estimates of SE. +#' Method details and assumptions described in +#' LeBauer 2020 Transforming ANOVA and Regression statistics for Meta-analysis. +#' Authorea. DOI: https://doi.org/10.22541/au.158359749.96662550 +#' @param data data frame with columns for mean, statistic, n, +#' and statistic name +#' @return data frame with statistics transformed to SE +#' @author David LeBauer +#' @export +#' @examples +#' statdf <- data.frame(Y=rep(1,5), +#' stat=rep(1,5), +#' n=rep(4,5), +#' statname=c('SD', 'MSE', 'LSD', 'HSD', 'MSD')) +#' transformstats(statdf) transformstats <- function(data) { if (is.factor(data$statname) && !"SE" %in% levels(data$statname)) { data$statname <- factor( diff --git a/base/utils/R/ud_convert.R b/base/utils/R/ud_convert.R index affc7bb4836..ee96bf60cd9 100644 --- a/base/utils/R/ud_convert.R +++ b/base/utils/R/ud_convert.R @@ -1,17 +1,26 @@ -##' Convert units -##' -##' Unit conversion to replace the now-unmaintained `udunits2::ud.convert` -##' @author Chris Black -##' -##' @param x numeric vector -##' @param u1 string parseable as the units in which `x` is provided -##' @param u2 string parseable as the units to convert to -##' -##' @return numeric vector with values converted to units in `u2` -##' @export +#' Convert units +#' +#' Unit conversion to replace the now-unmaintained `udunits2::ud.convert` +#' @author Chris Black +#' +#' @param x vector of class "numeric" or "difftime" +#' @param u1 string parseable as the units in which `x` is provided. If `x` is +#' class "difftime", then `u1` is not actually used. However, it still needs +#' to be supplied and needs to be convertible to `u2` for consistency. +#' @param u2 string parseable as the units to convert to +#' +#' @return numeric vector with values converted to units in `u2` +#' @export ud_convert <- function(x, u1, u2) { stopifnot(units::ud_are_convertible(u1, u2)) - x1 <- units::set_units(x, value = u1, mode = "standard") + if(inherits(x, "difftime")) { + x1 <- units::as_units(x) + if(units(x1) != units(units::as_units(u1))) { + warning("Units of `x` don't match `u1`, using '", units::deparse_unit(x1), "' instead") + } + } else { + x1 <- units::set_units(x, value = u1, mode = "standard") + } x2 <- units::set_units(x1, value = u2, mode = "standard") units::drop_units(x2) diff --git a/base/utils/R/unit_is_parseable.R b/base/utils/R/unit_is_parseable.R index 17b3779537d..7ec67361258 100644 --- a/base/utils/R/unit_is_parseable.R +++ b/base/utils/R/unit_is_parseable.R @@ -1,17 +1,17 @@ -##' Check whether a string can be interpreted as a unit -##' -##' Function will replace the now-unmaintained `udunits2::ud.is.parseable` -##' @author Tanishq Jain -##' -##' @param unit A character string representing a type of units -##' -##' @return TRUE if the units is parseable, FALSE otherwise. -##' -##' @examples -##' unit_is_parseable("g/sec^2") -##' unit_is_parseable("kiglometters") -##' -##' @export +#' Check whether a string can be interpreted as a unit +#' +#' Function will replace the now-unmaintained `udunits2::ud.is.parseable` +#' @author Tanishq Jain +#' +#' @param unit A character string representing a type of units +#' +#' @return TRUE if the units is parseable, FALSE otherwise. +#' +#' @examples +#' unit_is_parseable("g/sec^2") +#' unit_is_parseable("kiglometters") +#' +#' @export unit_is_parseable <- function(unit){ tryCatch({ if(units::as_units(unit)) @@ -19,4 +19,4 @@ unit_is_parseable <- function(unit){ }, error = function(e) FALSE ) -} # unit_is_parseable \ No newline at end of file +} # unit_is_parseable diff --git a/base/utils/R/utils.R b/base/utils/R/utils.R index 5e155f2e294..1bb2174ad35 100644 --- a/base/utils/R/utils.R +++ b/base/utils/R/utils.R @@ -1,32 +1,23 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2012 University of Illinois, NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# University of Illinois/NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- - -#--------------------------------------------------------------------------------------------------# -# Small, miscellaneous functions for use throughout PECAn -#--------------------------------------------------------------------------------------------------# - -#--------------------------------------------------------------------------------------------------# -##' return MstMIP variable as ncvar -##' -##' returns a MstMIP variable as a ncvar based on name and other parameters -##' passed in. -##' -##' @title MstMIP variable -##' @export -##' @param name of variable -##' @param lat latitude if dimension requests it -##' @param lon longitude if dimension requests it -##' @param time time if dimension requests it -##' @param nsoil nsoil if dimension requests it -##' @param silent logical: suppress log messages about missing variables? -##' @return ncvar based on MstMIP definition -##' @author Rob Kooper + +#--------------------------------------------------------------------------------------------------# +# Small, miscellaneous functions for use throughout PEcAn +#--------------------------------------------------------------------------------------------------# + +#--------------------------------------------------------------------------------------------------# +#' return MstMIP variable as ncvar +#' +#' returns a MstMIP variable as a ncvar based on name and other parameters +#' passed in. +#' +#' @export +#' @param name of variable +#' @param lat latitude if dimension requests it +#' @param lon longitude if dimension requests it +#' @param time time if dimension requests it +#' @param nsoil nsoil if dimension requests it +#' @param silent logical: suppress log messages about missing variables? +#' @return ncvar based on MstMIP definition +#' @author Rob Kooper mstmipvar <- function(name, lat = NULL, lon = NULL, time = NULL, nsoil = NULL, silent = FALSE) { nc_var <- PEcAn.utils::standard_vars[PEcAn.utils::standard_vars$Variable.Name == name, ] @@ -72,29 +63,31 @@ mstmipvar <- function(name, lat = NULL, lon = NULL, time = NULL, nsoil = NULL, s #--------------------------------------------------------------------------------------------------# -##' left padded by zeros up to a given number of digits. -##' -##' returns a string representing a given number -##' @title Left Pad Zeros -##' @export -##' @param num number to be padded (integer) -##' @param digits number of digits to add -##' @return num with zeros to the left -##' @export -##' @author Carl Davidson +#' Left Pad Zeros +#' +#' left padded by zeros up to a given number of digits. +#' +#' returns a string representing a given number +#' @export +#' @param num number to be padded (integer) +#' @param digits number of digits to add +#' @return num with zeros to the left +#' @export +#' @author Carl Davidson left.pad.zeros <- function(num, digits = 5) { format_string <- paste0("%", sprintf("0%.0f.0f", digits)) return(sprintf(format_string, num)) } # left.pad.zeros -##' Truncates vector at 0 -##' @name zero.truncate -##' @title Zero Truncate -##' @param y numeric vector -##' @return numeric vector with all values less than 0 set to 0 -##' @export -##' @author unknown +#' Zero Truncate +#' +#' Truncates vector at 0 +#' +#' @param y numeric vector +#' @return numeric vector with all values less than 0 set to 0 +#' @export +#' @author unknown zero.truncate <- function(y) { y[y < 0 | is.na(y)] <- 0 return(y) @@ -102,18 +95,17 @@ zero.truncate <- function(y) { #--------------------------------------------------------------------------------------------------# -##' R implementation of rsync -##' -##' rsync is a file copying tool in bash -##' @title rsync -##' @param args rsync arguments (see man rsync) -##' @param from source -##' @param to destination -##' @param pattern file pattern to be matched -##' @return nothing, transfers files as a side effect -##' @export -##' @author David LeBauer -##' @author Shawn Serbin +#' R implementation of rsync +#' +#' rsync is a file copying tool in bash +#' @param args rsync arguments (see man rsync) +#' @param from source +#' @param to destination +#' @param pattern file pattern to be matched +#' @return nothing, transfers files as a side effect +#' @export +#' @author David LeBauer +#' @author Shawn Serbin rsync <- function(args, from, to, pattern = "") { PEcAn.logger::logger.warn("NEED TO USE TUNNEL") system(paste0("rsync", " ", args, " ", from, pattern, " ", to), intern = TRUE) @@ -121,13 +113,12 @@ rsync <- function(args, from, to, pattern = "") { #--------------------------------------------------------------------------------------------------# -##' R implementation of SSH -##' -##' @title SSH -##' @param host (character) machine to connect to -##' @param ... Commands to execute. Will be passed as a single quoted string -##' @param args futher arguments -##' @export +#' R implementation of SSH +#' +#' @param host (character) machine to connect to +#' @param ... Commands to execute. Will be passed as a single quoted string +#' @param args futher arguments +#' @export ssh <- function(host, ..., args = "") { PEcAn.logger::logger.warn("NEED TO USE TUNNEL") if (host == "localhost") { @@ -140,52 +131,50 @@ ssh <- function(host, ..., args = "") { #--------------------------------------------------------------------------------------------------# -##' Convert vector to comma delimited string -##' -##' vecpaste, turns vector into comma delimited string fit for SQL statements. -##' @title vecpaste -##' @param x vector -##' @return comma delimited string -##' @export +#' Convert vector to comma delimited string +#' +#' vecpaste, turns vector into comma delimited string fit for SQL statements. +#' @param x vector +#' @return comma delimited string +#' @export vecpaste <- function(x) paste(paste0("'", x, "'"), collapse = ",") #--------------------------------------------------------------------------------------------------# -##' returns an id representing a model run -##' -##' Provides a consistent method of naming runs; for use in model input files and indices -##' @title Get Run ID -##' @param run.type character, can be any character; currently 'SA' is used for sensitivity analysis, 'ENS' for ensemble run. -##' @param index unique index for different runs, e.g. integer counting members of an -##' ensemble or a quantile used to which a trait has been perturbed for sensitivity analysis -##' @param trait name of trait being sampled (for sensitivity analysis) -##' @param pft.name name of PFT (value from pfts.names field in database) -##' @param site.id optional site id .This is could be necessary for multisite write=false ensembles. -##' @return id representing a model run -##' @export -##' @examples -##' get.run.id('ENS', left.pad.zeros(1, 5)) -##' get.run.id('SA', round(qnorm(-3),3), trait = 'Vcmax') -##' @author Carl Davidson, David LeBauer +#' returns an id representing a model run +#' +#' Provides a consistent method of naming runs; for use in model input files and indices +#' @param run.type character, can be any character; currently 'SA' is used for sensitivity analysis, 'ENS' for ensemble run. +#' @param index unique index for different runs, e.g. integer counting members of an +#' ensemble or a quantile used to which a trait has been perturbed for sensitivity analysis +#' @param trait name of trait being sampled (for sensitivity analysis) +#' @param pft.name name of PFT (value from pfts.names field in database) +#' @param site.id optional site id .This is could be necessary for multisite write=false ensembles. +#' @return id representing a model run +#' @export +#' @examples +#' get.run.id('ENS', left.pad.zeros(1, 5)) +#' get.run.id('SA', round(qnorm(-3),3), trait = 'Vcmax') +#' @author Carl Davidson, David LeBauer get.run.id <- function(run.type, index, trait = NULL, pft.name = NULL, site.id=NULL) { result <- paste(c(run.type, pft.name, trait, index, site.id), collapse = "-") return(result) } # get.run.id #--------------------------------------------------------------------------------------------------# -##' Zero bounded density using log density transform -##' -##' Provides a zero bounded density estimate of a parameter. -##' Kernel Density Estimation used by the \code{\link[stats]{density}} function will cause problems -##' at the left hand end because it will put some weight on negative values. -##' One useful approach is to transform to logs, estimate the density using KDE, and then transform back. -##' @title Zero Bounded Density -##' @param x data, as a numeric vector -##' @param bw The smoothing bandwidth to be used. See 'bw.nrd' -##' @param n number of points to use in kernel density estimate. See \code{\link[stats]{density}} -##' @return data frame with back-transformed log density estimate -##' @author \href{https://stats.stackexchange.com/q/6588/2750}{Rob Hyndman} -##' @references M. P. Wand, J. S. Marron and D. Ruppert, 1991. Transformations in Density Estimation. Journal of the American Statistical Association. 86(414):343-353 \url{http://www.jstor.org/stable/2290569} +#' Zero bounded density using log density transform +#' +#' Provides a zero bounded density estimate of a parameter. +#' Kernel Density Estimation used by the \code{\link[stats]{density}} function will cause problems +#' at the left hand end because it will put some weight on negative values. +#' One useful approach is to transform to logs, estimate the density using KDE, and then transform back. +#' @param x data, as a numeric vector +#' @param bw The smoothing bandwidth to be used. See 'bw.nrd' +#' @param n number of points to use in kernel density estimate. See \code{\link[stats]{density}} +#' @return data frame with back-transformed log density estimate +#' @author \href{https://stats.stackexchange.com/q/6588/2750}{Rob Hyndman} +#' @references M. P. Wand, J. S. Marron and D. Ruppert, 1991. Transformations in Density Estimation. Journal of the American Statistical Association. 86(414):343-353 \url{http://www.jstor.org/stable/2290569} +#' @export zero.bounded.density <- function(x, bw = "SJ", n = 1001) { y <- log(x) g <- stats::density(y, bw = bw, n = n) @@ -197,16 +186,14 @@ zero.bounded.density <- function(x, bw = "SJ", n = 1001) { #--------------------------------------------------------------------------------------------------# -##' Summarize results of replicate observations in trait data query -##' -##' @title Summarize Results -##' @param result dataframe with results of trait data query -##' @return result with replicate observations summarized -##' @export summarize.result -##' @usage summarize.result(result) -##' @importFrom rlang .data -##' @importFrom magrittr %>% -##' @author David LeBauer, Alexey Shiklomanov +#' Summarize results of replicate observations in trait data query +#' +#' @param result dataframe with results of trait data query +#' @return result with replicate observations summarized +#' @export summarize.result +#' @importFrom rlang .data +#' @importFrom magrittr %>% +#' @author David LeBauer, Alexey Shiklomanov summarize.result <- function(result) { ans1 <- result %>% dplyr::filter(.data$n == 1) %>% @@ -233,13 +220,12 @@ summarize.result <- function(result) { #--------------------------------------------------------------------------------------------------# -##' Further summarizes output from summary.mcmc -##' -##' @title Get stats for parameters in MCMC output -##' @param mcmc.summary probably produced by \code{\link[coda]{summary.mcmc}} -##' @param sample.size passed as 'n' in returned list -##' @return list with summary statistics for parameters in an MCMC chain -##' @author David LeBauer +#' Further summarizes output from summary.mcmc +#' +#' @param mcmc.summary probably produced by \code{\link[coda]{summary.mcmc}} +#' @param sample.size passed as 'n' in returned list +#' @return list with summary statistics for parameters in an MCMC chain +#' @author David LeBauer get.stats.mcmc <- function(mcmc.summary, sample.size) { a <- list(n = sample.size) for (parm in c("beta.o", "sd.y", "sd.site", "sd.trt", "beta.ghs[2]")) { @@ -255,20 +241,21 @@ get.stats.mcmc <- function(mcmc.summary, sample.size) { #--------------------------------------------------------------------------------------------------# -##' A helper function for building a LaTex table. -##' -##' Used by \code{\link{get.parameter.stat}}. -##' @title Paste Stats -##' @name paste.stats -##' @param median 50-percent quantile -##' @param lcl lower confidence limit -##' @param ucl upper confidence limit -##' @param n significant digits for printing. Passed to \code{\link{tabnum}} -##' @export -##' @author David LeBauer -##' @examples -##' paste.stats(3.333333, 5.00001, 6.22222, n = 3) -##' # [1] "$3.33(5,6.22)$" +#' Paste Stats +#' +#' A helper function for building a LaTex table. +#' +#' Used by \code{\link{get.parameter.stat}}. +#' @name paste.stats +#' @param median 50-percent quantile +#' @param lcl lower confidence limit +#' @param ucl upper confidence limit +#' @param n significant digits for printing. Passed to \code{\link{tabnum}} +#' @export +#' @author David LeBauer +#' @examples +#' paste.stats(3.333333, 5.00001, 6.22222, n = 3) +#' # [1] "$3.33(5,6.22)$" paste.stats <- function(median, lcl, ucl, n = 2) { paste0("$", tabnum(median, n), "(", tabnum(lcl, n), ",", tabnum(ucl, n), ")", @@ -277,16 +264,17 @@ paste.stats <- function(median, lcl, ucl, n = 2) { #--------------------------------------------------------------------------------------------------# -##' Gets statistics for LaTeX - formatted table -##' -##' @title Get Parameter Statistics -##' @param mcmc.summary probably produced by \code{\link[coda]{summary.mcmc}} -##' @param parameter name of parameter to extract, as character -##' @return table with parameter statistics -##' @author David LeBauer -##' @export -##' @examples -##' \dontrun{get.parameter.stat(mcmc.summaries[[1]], 'beta.o')} +#' Get Parameter Statistics +#' +#' Gets statistics for LaTeX - formatted table +#' +#' @param mcmc.summary probably produced by \code{\link[coda]{summary.mcmc}} +#' @param parameter name of parameter to extract, as character +#' @return table with parameter statistics +#' @author David LeBauer +#' @export +#' @examples +#' \dontrun{get.parameter.stat(mcmc.summaries[[1]], 'beta.o')} get.parameter.stat <- function(mcmc.summary, parameter) { paste.stats(median = mcmc.summary$quantiles[parameter, "50%"], lcl = mcmc.summary$quantiles[parameter, c("2.5%")], @@ -297,14 +285,15 @@ get.parameter.stat <- function(mcmc.summary, parameter) { #--------------------------------------------------------------------------------------------------# -##' Calculate mean, variance statistics, and CI from a known distribution -##' -##' @title Probability Distirbution Function Statistics -##' @param distn name of distribution used by R (beta, f, gamma, lnorm, norm, weibull) -##' @param A first parameter -##' @param B second parameter -##' @return list with mean, variance, and 95 CI -##' @author David LeBauer +#' Probability Distribution Function Statistics +#' +#' Calculate mean, variance statistics, and CI from a known distribution +#' +#' @param distn name of distribution used by R (beta, f, gamma, lnorm, norm, weibull) +#' @param A first parameter +#' @param B second parameter +#' @return list with mean, variance, and 95 CI +#' @author David LeBauer ## in future, perhaps create S3 functions: get.stats.pdf <- pdf.stats pdf.stats <- function(distn, A, B) { distn <- as.character(distn) @@ -338,23 +327,23 @@ pdf.stats <- function(distn, A, B) { #--------------------------------------------------------------------------------------------------# -##' Dictionary of terms used to identify traits in ed, filenames, and figures -##' -##' @return a dataframe with id, the name used by ED and PEcAn database for a parameter; fileid, an abbreviated -##' name used for files; figid, the parameter name written out as best known in english for figures -##' and tables. -##' -##' @param traits a vector of trait names, if traits = NULL, all of the traits will be returned. -##' @export -##' @examples -##' # convert parameter name to a string appropriate for end-use plotting -##' \dontrun{ -##' trait.lookup('growth_resp_factor') -##' trait.lookup('growth_resp_factor')$figid -##' -##' # get a list of all traits and units in dictionary -##' trait.lookup()[,c('figid', 'units')] -##' } +#' Dictionary of terms used to identify traits in ed, filenames, and figures +#' +#' @return a dataframe with id, the name used by ED and PEcAn database for a parameter; fileid, an abbreviated +#' name used for files; figid, the parameter name written out as best known in english for figures +#' and tables. +#' +#' @param traits a vector of trait names, if traits = NULL, all of the traits will be returned. +#' @export +#' @examples +#' # convert parameter name to a string appropriate for end-use plotting +#' \dontrun{ +#' trait.lookup('growth_resp_factor') +#' trait.lookup('growth_resp_factor')$figid +#' +#' # get a list of all traits and units in dictionary +#' trait.lookup()[,c('figid', 'units')] +#' } trait.lookup <- function(traits = NULL) { if (is.null(traits)) { return(PEcAn.utils::trait.dictionary) @@ -364,17 +353,18 @@ trait.lookup <- function(traits = NULL) { #--------------------------------------------------------------------------------------------------# -##' Convert number to n significant digits -##' -##' @title Table numbers -##' @param x numeric value or vector -##' @param n number of significant figures -##' @export -##' @author David LeBauer -##' @return x rounded to n significant figures -##' @examples -##' tabnum(1.2345) -##' tabnum(1.2345, n = 4) +#' Table numbers +#' +#' Convert number to n significant digits +#' +#' @param x numeric value or vector +#' @param n number of significant figures +#' @export +#' @author David LeBauer +#' @return x rounded to n significant figures +#' @examples +#' tabnum(1.2345) +#' tabnum(1.2345, n = 4) tabnum <- function(x, n = 3) { ans <- as.numeric(signif(x, n)) names(ans) <- names(x) @@ -384,15 +374,16 @@ tabnum <- function(x, n = 3) { #--------------------------------------------------------------------------------------------------# -##' Scale temperature dependent trait from measurement temperature to reference temperature -##' -##' @title Arrhenius scaling -##' @param observed.value observed value of temperature dependent trait, e.g. Vcmax, root respiration rate -##' @param old.temp temperature at which measurement was taken or previously scaled to -##' @param new.temp temperature to be scaled to, default = 25 C -##' @return numeric value at reference temperature -##' @export -##' @author unknown +#' Arrhenius scaling +#' +#' Scale temperature dependent trait from measurement temperature to reference temperature +#' +#' @param observed.value observed value of temperature dependent trait, e.g. Vcmax, root respiration rate +#' @param old.temp temperature at which measurement was taken or previously scaled to +#' @param new.temp temperature to be scaled to, default = 25 C +#' @return numeric value at reference temperature +#' @export +#' @author unknown arrhenius.scaling <- function(observed.value, old.temp, new.temp = 25) { new.temp.K <- ud_convert(new.temp, "degC", "K") old.temp.K <- ud_convert(old.temp, "degC", "K") @@ -402,32 +393,28 @@ arrhenius.scaling <- function(observed.value, old.temp, new.temp = 25) { #--------------------------------------------------------------------------------------------------# -##' Capitalize a string -##' -##' @title Capitalize a string -##' @param x string -##' @return x, capitalized -##' @author David LeBauer +#' Capitalize a string +#' +#' @param x string +#' @return x, capitalized +#' @author David LeBauer capitalize <- function(x) { x <- as.character(x) s <- strsplit(x, " ")[[1]] return(paste(toupper(substring(s, 1, 1)), substring(s, 2), sep = "", collapse = " ")) } # capitalize -# isFALSE <- function(x) !isTRUE(x) -#--------------------------------------------------------------------------------------------------# - #--------------------------------------------------------------------------------------------------# -##' New xtable -##' -##' utility to properly escape the '%' sign for latex -##' @title newxtable -##' @param x data.frame to be converted to latex table -##' @param environment can be 'table'; 'sidewaystable' if using latex rotating package -##' @param table.placement,label,caption,caption.placement,align passed to \code{\link[xtable]{xtable}} -##' @return Latex version of table, with percentages properly formatted -##' @author David LeBauer +#' New xtable +#' +#' utility to properly escape the '%' sign for latex +#' +#' @param x data.frame to be converted to latex table +#' @param environment can be 'table'; 'sidewaystable' if using latex rotating package +#' @param table.placement,label,caption,caption.placement,align passed to \code{\link[xtable]{xtable}} +#' @return Latex version of table, with percentages properly formatted +#' @author David LeBauer newxtable <- function(x, environment = "table", table.placement = "ht", label = NULL, caption = NULL, caption.placement = NULL, align = NULL) { need_packages("xtable") @@ -442,15 +429,15 @@ newxtable <- function(x, environment = "table", table.placement = "ht", label = #--------------------------------------------------------------------------------------------------# -##' Convert author, year, title to bibtex citation format -##' -##' Converts author year title to author1999abc format -##' @title bibtexify -##' @param author name of first author -##' @param year year of publication -##' @param title manuscript title -##' @return bibtex citation -##' @author unknown +#' bibtexify +#' +#' Converts author year title to bibtex `author1999abc` format +#' +#' @param author name of first author +#' @param year year of publication +#' @param title manuscript title +#' @return bibtex citation +#' @author unknown bibtexify <- function(author, year, title) { acronym <- abbreviate(title, minlength = 3, strict = TRUE) return(paste0(author, year, acronym)) @@ -459,16 +446,16 @@ bibtexify <- function(author, year, title) { #--------------------------------------------------------------------------------------------------# -##' Convert categorical variable into sequential integers -##' -##' Turns any categorical variable into a sequential integer. -##' This transformation is required for using data in BUGS/JAGS -##' @title as.sequence -##' @param x categorical variable as vector -##' @param na.rm logical: return NA's or replace with max(x) + 1 -##' @return sequence from 1:length(unique(x)) -##' @export -##' @author David LeBauer +#' Convert categorical variable into sequential integers +#' +#' Turns any categorical variable into a sequential integer. +#' This transformation is required for using data in BUGS/JAGS +#' +#' @param x categorical variable as vector +#' @param na.rm logical: return NA's or replace with max(x) + 1 +#' @return sequence from 1:length(unique(x)) +#' @export +#' @author David LeBauer as.sequence <- function(x, na.rm = TRUE) { x2 <- as.integer(factor(x, unique(x))) if (all(is.na(x2))) { @@ -483,16 +470,16 @@ as.sequence <- function(x, na.rm = TRUE) { #--------------------------------------------------------------------------------------------------# -##' Create a temporary settings file -##' -##' Uses \code{\link{tempfile}} function to provide a valid temporary file (OS independent) -##' Useful for testing functions that depend on settings file -##' Reference: http://stackoverflow.com/a/12940705/199217 -##' @title temp.settings -##' @param settings.txt character vector to be written -##' @return character vector written to and read from a temporary file -##' @export -##' @author David LeBauer +#' Create a temporary settings file +#' +#' Uses \code{\link{tempfile}} function to provide a valid temporary file (OS independent) +#' Useful for testing functions that depend on settings file +#' Reference: http://stackoverflow.com/a/12940705/199217 +#' +#' @param settings.txt character vector to be written +#' @return character vector written to and read from a temporary file +#' @export +#' @author David LeBauer temp.settings <- function(settings.txt) { temp <- tempfile() on.exit(unlink(temp), add = TRUE) @@ -504,19 +491,19 @@ temp.settings <- function(settings.txt) { #--------------------------------------------------------------------------------------------------# -##' Test if function gives an error -##' -##' adaptation of try that returns a logical value (FALSE if error) -##' @title tryl -##' @param FUN function to be evaluated for error -##' @return FALSE if function returns error; else TRUE -##' @export -##' @examples -##' tryl(1+1) -##' # TRUE -##' tryl(sum('a')) -##' # FALSE -##' @author David LeBauer +#' Test if function gives an error +#' +#' adaptation of try that returns a logical value (FALSE if error) +#' +#' @param FUN function to be evaluated for error +#' @return FALSE if function returns error; else TRUE +#' @export +#' @examples +#' tryl(1+1) +#' # TRUE +#' tryl(sum('a')) +#' # FALSE +#' @author David LeBauer tryl <- function(FUN) { out <- tryCatch(FUN, error = function(e) e) ans <- !inherits(out, "error") @@ -526,14 +513,14 @@ tryl <- function(FUN) { #--------------------------------------------------------------------------------------------------# -##' load model package -##' @title Load model package -##' @param model name of model -##' @return FALSE if function returns error; else TRUE -##' @export -##' @examples -##' \dontrun{require.modelpkg(BioCro)} -##' @author David LeBauer +#' Load model package +#' +#' @param model name of model +#' @return FALSE if function returns error; else TRUE +#' @export +#' @examples +#' \dontrun{require.modelpkg(BioCro)} +#' @author David LeBauer load.modelpkg <- function(model) { pecan.modelpkg <- paste0("PEcAn.", model) if (!pecan.modelpkg %in% names(utils::sessionInfo()$otherPkgs)) { @@ -549,14 +536,14 @@ load.modelpkg <- function(model) { #--------------------------------------------------------------------------------------------------# -##' conversion function for the unit conversions that udunits cannot handle but often needed in PEcAn calculations -##' @title misc.convert -##' @export -##' @param x convertible values -##' @param u1 unit to be converted from, character -##' @param u2 unit to be converted to, character -##' @return val converted values -##' @author Istem Fer, Shawn Serbin +#' conversion function for the unit conversions that udunits cannot handle but often needed in PEcAn calculations +#' +#' @export +#' @param x convertible values +#' @param u1 unit to be converted from, character +#' @param u2 unit to be converted to, character +#' @return val converted values +#' @author Istem Fer, Shawn Serbin misc.convert <- function(x, u1, u2) { amC <- 12.0107 # atomic mass of carbon @@ -588,13 +575,13 @@ misc.convert <- function(x, u1, u2) { #--------------------------------------------------------------------------------------------------# -##' function to check whether units are convertible by misc.convert function -##' @title misc.are.convertible -##' @export -##' @param u1 unit to be converted from, character -##' @param u2 unit to be converted to, character -##' @return logical -##' @author Istem Fer, Shawn Serbin +#' function to check whether units are convertible by misc.convert function +#' +#' @export +#' @param u1 unit to be converted from, character +#' @param u2 unit to be converted to, character +#' @return logical +#' @author Istem Fer, Shawn Serbin misc.are.convertible <- function(u1, u2) { # make sure the order of vectors match @@ -619,12 +606,12 @@ misc.are.convertible <- function(u1, u2) { #--------------------------------------------------------------------------------------------------# -##' Convert expression to variable names -##' @title convert.expr -##' @param expression expression string -##' @return list -##' @export -##' @author Istem Fer +#' Convert expression to variable names +#' +#' @param expression expression string +#' @return list +#' @export +#' @author Istem Fer convert.expr <- function(expression) { # split equation to LHS and RHS deri.var <- gsub("=.*$", "", expression) # name of the derived variable @@ -646,30 +633,31 @@ convert.expr <- function(expression) { #--------------------------------------------------------------------------------------------------# -##' Simple function to use ncftpget for FTP downloads behind a firewall. -##' Requires ncftpget and a properly formatted config file in the users -##' home directory -##' @title download_file -##' @param url complete URL for file download -##' @param filename destination file name -##' @param method Method of file retrieval. Can set this using the `options(download.ftp.method=[method])` in your Rprofile. -##' example options(download.ftp.method="ncftpget") -##' -##' @examples -##' \dontrun{ -##' download_file("http://lib.stat.cmu.edu/datasets/csb/ch11b.txt","~/test.download.txt") -##' -##' download_file(" -##' ftp://ftp.cdc.noaa.gov/Datasets/NARR/monolevel/pres.sfc.2000.nc", -##' "~/pres.sfc.2000.nc") -##' } -##' -##' @export -##' -##' @author Shawn Serbin, Rob Kooper +#' Simple function to use ncftpget for FTP downloads behind a firewall. +#' +#' Requires ncftpget and a properly formatted config file in the users +#' home directory +#' +#' @param url complete URL for file download +#' @param filename destination file name +#' @param method Method of file retrieval. Can set this using the `options(download.ftp.method=[method])` in your Rprofile. +#' example options(download.ftp.method="ncftpget") +#' +#' @examples +#' \dontrun{ +#' download_file("http://lib.stat.cmu.edu/datasets/csb/ch11b.txt","~/test.download.txt") +#' +#' download_file(" +#' ftp://ftp.cdc.noaa.gov/Datasets/NARR/monolevel/pres.sfc.2000.nc", +#' "~/pres.sfc.2000.nc") +#' } +#' +#' @export +#' +#' @author Shawn Serbin, Rob Kooper download_file <- function(url, filename, method) { if (startsWith(url, "ftp://")) { - method <- if (missing(method)) getOption("download.ftp.method", default = "auto") + if (missing(method)) method <- getOption("download.ftp.method", default = "auto") if (method == "ncftpget") { PEcAn.logger::logger.debug(paste0("FTP Method: ",method)) #system2("ncftpget", c("-c", "url", ">", filename)) @@ -685,34 +673,30 @@ download_file <- function(url, filename, method) { #--------------------------------------------------------------------------------------------------# -##' Retry function X times before stopping in error -##' -##' @title retry.func -##' @name retry.func -##' @description Retry function X times before stopping in error -##' -##' @param expr The function to try running -##' @param maxErrors The number of times to retry the function -##' @param sleep How long to wait before retrying the function call -##' @param isError function to use for checking whether to try again. -##' Must take one argument that contains the result of evaluating `expr` -##' and return TRUE if another retry is needed -##' -##' @return retval returns the results of the function call -##' -##' @examples -##' \dontrun{ -##' file_url <- paste0("https://thredds.daac.ornl.gov/", -##' "thredds/dodsC/ornldaac/1220", -##' "/mstmip_driver_global_hd_climate_lwdown_1999_v1.nc4") -##' dap <- retry.func( -##' ncdf4::nc_open(file_url) -##' maxErrors=10, -##' sleep=2) -##' } -##' -##' @export -##' @author Shawn Serbin +#' Retry function X times before stopping in error +#' +#' @param expr The function to try running +#' @param maxErrors The number of times to retry the function +#' @param sleep How long to wait before retrying the function call +#' @param isError function to use for checking whether to try again. +#' Must take one argument that contains the result of evaluating `expr` +#' and return TRUE if another retry is needed +#' +#' @return retval returns the results of the function call +#' +#' @examples +#' \dontrun{ +#' file_url <- paste0("https://thredds.daac.ornl.gov/", +#' "thredds/dodsC/ornldaac/1220", +#' "/mstmip_driver_global_hd_climate_lwdown_1999_v1.nc4") +#' dap <- retry.func( +#' ncdf4::nc_open(file_url), +#' maxErrors=10, +#' sleep=2) +#' } +#' +#' @export +#' @author Shawn Serbin retry.func <- function(expr, isError = function(x) inherits(x, "try-error"), maxErrors = 5, sleep = 0) { attempts = 0 retval = try(eval(expr)) @@ -737,25 +721,25 @@ retry.func <- function(expr, isError = function(x) inherits(x, "try-error"), max #--------------------------------------------------------------------------------------------------# -##' Adverb to try calling a function `n` times before giving up -##' -##' @param .f Function to call. -##' @param n Number of attempts to try -##' @param timeout Timeout between attempts, in seconds -##' @param silent Silence error messages? -##' @return Modified version of input function -##' @examples -##' rlog <- robustly(log, timeout = 0.3) -##' try(rlog("fail")) -##' \dontrun{ -##' nc_openr <- robustly(ncdf4::nc_open, n = 10, timeout = 0.5) -##' nc <- nc_openr(url) -##' # ...or just call the function directly -##' nc <- robustly(ncdf4::nc_open, n = 20)(url) -##' # Useful in `purrr` maps -##' many_vars <- purrr::map(varnames, robustly(ncdf4::ncvar_get), nc = nc) -##' } -##' @export +#' Adverb to try calling a function `n` times before giving up +#' +#' @param .f Function to call. +#' @param n Number of attempts to try +#' @param timeout Timeout between attempts, in seconds +#' @param silent Silence error messages? +#' @return Modified version of input function +#' @examples +#' rlog <- robustly(log, timeout = 0.3) +#' try(rlog("fail")) +#' \dontrun{ +#' nc_openr <- robustly(ncdf4::nc_open, n = 10, timeout = 0.5) +#' nc <- nc_openr(url) +#' # ...or just call the function directly +#' nc <- robustly(ncdf4::nc_open, n = 20)(url) +#' # Useful in `purrr` maps +#' many_vars <- purrr::map(varnames, robustly(ncdf4::ncvar_get), nc = nc) +#' } +#' @export robustly <- function(.f, n = 10, timeout = 0.2, silent = TRUE) { .f <- purrr::as_mapper(.f) function(...) { @@ -770,8 +754,3 @@ robustly <- function(.f, n = 10, timeout = 0.2, silent = TRUE) { } } #--------------------------------------------------------------------------------------------------# - - -#################################################################################################### -### EOF. End of R script file. -#################################################################################################### diff --git a/base/utils/R/version.R b/base/utils/R/version.R new file mode 100644 index 00000000000..0e58d885272 --- /dev/null +++ b/base/utils/R/version.R @@ -0,0 +1,3 @@ +# Set at package install time, used by pecan.all::pecan_version() +# to identify development versions of packages +.build_hash <- Sys.getenv("PECAN_GIT_REV", "unknown") diff --git a/base/utils/R/write.config.utils.R b/base/utils/R/write.config.utils.R index ce30d82adb0..06c2bb4a379 100644 --- a/base/utils/R/write.config.utils.R +++ b/base/utils/R/write.config.utils.R @@ -1,24 +1,16 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2012 University of Illinois, NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# University of Illinois/NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- #--------------------------------------------------------------------------------------------------# ### TODO: Generalize this code for all ecosystem models (e.g. ED2.2, SiPNET, etc). #--------------------------------------------------------------------------------------------------# -##' Get Quantiles -##' -##' Returns a vector of quantiles specified by a given `` xml tag -##' -##' @param quantiles.tag specifies tag used to specify quantiles -##' @return vector of quantiles -##' @export -##' @author David LeBauer +#' Get Quantiles +#' +#' Returns a vector of quantiles specified by a given `` xml tag +#' +#' @param quantiles.tag specifies tag used to specify quantiles +#' @return vector of quantiles +#' @export +#' @author David LeBauer get.quantiles <- function(quantiles.tag) { quantiles <- vector() if (!is.null(quantiles.tag$quantile)) { @@ -38,14 +30,14 @@ get.quantiles <- function(quantiles.tag) { } # get.quantiles -##' get sensitivity samples as a list -##' -##' @param pft Plant Functional Type -##' @param env -##' @param quantiles quantiles at which to obtain samples from parameter for -##' sensitivity analysis -##' @export -##' @return sa.sample.list +#' get sensitivity samples as a list +#' +#' @param pft list of samples from Plant Functional Types +#' @param env list of samples from environment parameters +#' @param quantiles quantiles at which to obtain samples from parameter for +#' sensitivity analysis +#' @export +#' @return sa.sample.list get.sa.sample.list <- function(pft, env, quantiles) { sa.sample.list <- list() for (i in seq_along(pft)) { @@ -57,23 +49,23 @@ get.sa.sample.list <- function(pft, env, quantiles) { } # get.sa.sample.list -##' Get sensitivity analysis samples -##' -##' Samples parameters for a model run at specified quantiles. -##' -##' Samples from long (>2000) vectors that represent random samples from a -##' trait distribution. -##' Samples are either the MCMC chains output from the Bayesian meta-analysis -##' or are randomly sampled from the closed-form distribution of the -##' parameter probability distribution function. -##' The list is indexed first by trait, then by quantile. -##' -##' @param samples random samples from trait distribution -##' @param quantiles list of quantiles to at which to sample, -##' set in settings file -##' @return a list of lists representing quantile values of trait distributions -##' @export -##' @author David LeBauer +#' Get sensitivity analysis samples +#' +#' Samples parameters for a model run at specified quantiles. +#' +#' Samples from long (>2000) vectors that represent random samples from a +#' trait distribution. +#' Samples are either the MCMC chains output from the Bayesian meta-analysis +#' or are randomly sampled from the closed-form distribution of the +#' parameter probability distribution function. +#' The list is indexed first by trait, then by quantile. +#' +#' @param samples random samples from trait distribution +#' @param quantiles list of quantiles to at which to sample, +#' set in settings file +#' @return a list of lists representing quantile values of trait distributions +#' @export +#' @author David LeBauer get.sa.samples <- function(samples, quantiles) { sa.samples <- data.frame() for (trait in names(samples)) { @@ -86,12 +78,12 @@ get.sa.samples <- function(samples, quantiles) { } # get.sa.samples -##' checks that met2model function exists -##' -##' Checks if `met2model.` exists for a particular model -##' -##' @param model model package name -##' @return logical +#' checks that met2model function exists +#' +#' Checks if `met2model.` exists for a particular model +#' +#' @param model model package name +#' @return logical met2model.exists <- function(model) { load.modelpkg(model) return(exists(paste0("met2model.", model))) diff --git a/base/utils/data/standard_vars.csv b/base/utils/data/standard_vars.csv index cd13211aa23..7453a3db34d 100755 --- a/base/utils/data/standard_vars.csv +++ b/base/utils/data/standard_vars.csv @@ -60,6 +60,8 @@ slow_soil_pool_carbon_content,slow_soil_pool_carbon_content_of_soil_layer,kg C m fast_soil_pool_carbon_content,fast_soil_pool_carbon_content_of_soil_layer,kg C m-2,Fast Soil Pool Carbon Content by Layer,Carbon Pools,real,lon,lat,time,depth,Fast soil pool carbon content of soil layer, structural_soil_pool_carbon_content,structural_soil_pool_carbon_content_of_soil_layer,kg C m-2,Structural Soil Pool Carbon Content by Layer,Carbon Pools,real,lon,lat,time,depth,Structural soil pool carbon content of soil layer, soil_nitrogen_content,soil_nitrogen_content_of_soil_layer,kg N m-2,Soil Nitrogen Content by Layer,Nitrogen Pools,real,lon,lat,time,depth,Total nitrogen content of soil layer, +soil_inorganic_nitrogen_content,soil_inorganic_nitrogen_content_of_soil_layer,kg N m-2,Soil Inorganic Nitrogen Content by Layer,Nitrogen Pools,real,lon,lat,time,depth,Total inorganic nitrogen content of soil layer (mineralized nitrogen), +soil_organic_nitrogen_content,soil_organic_nitrogen_content_of_soil_layer,kg N m-2,Soil Organic Nitrogen Content by Layer,Nitrogen Pools,real,lon,lat,time,depth,"Total organic nitrogen content of soil layer, excluding litter", soil_phosphorus_content,soil_phosphorus_content_of_soil_layer,kg P m-2,Soil Phosphorus Content by Layer,Phosphorus Pools,real,lon,lat,time,depth,Total phosphorus content of soil layer, Qh,NA,W m-2,Sensible heat,Energy Fluxes,real,lon,lat,time,NA,Sensible heat flux into the boundary layer (positive into atmosphere), Qle,NA,W m-2,Latent heat,Energy Fluxes,real,lon,lat,time,NA,Latent heat flux into the boundary layer (positive into atmosphere), diff --git a/base/utils/inst/clear.scratch.sh b/base/utils/inst/clear.scratch.sh index cbc75f2b292..815566596d6 100755 --- a/base/utils/inst/clear.scratch.sh +++ b/base/utils/inst/clear.scratch.sh @@ -1,13 +1,5 @@ #!/bin/bash -#------------------------------------------------------------------------------- -# Copyright (c) 2012 University of Illinois, NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# University of Illinois/NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- -# + #--------------------------------------------------------------------------------------------------# LOC=/scratch/$USER if [ -d "$LOC" ]; then diff --git a/base/utils/man/PEcAn.Rd b/base/utils/man/PEcAn.Rd index 3f5a6979f91..4a6752d93c7 100644 --- a/base/utils/man/PEcAn.Rd +++ b/base/utils/man/PEcAn.Rd @@ -2,33 +2,73 @@ % Please edit documentation in R/help.R \docType{package} \name{PEcAn} +\alias{PEcAn.utils} +\alias{PEcAn.utils-package} \alias{PEcAn} \alias{PECAn} \alias{pecan} \alias{package-pecan} \title{R package to support PEcAn, the Predictive Ecosystem Analyzer} \description{ -Instructions for the use of this package are provided in the project documentation \url{https://pecan.gitbooks.io/pecan-documentation/content/}. +Instructions for the use of this package are provided in the project +documentation \url{https://pecanproject.github.io/documentation.html}. } \details{ Project homepage: \url{pecanproject.org} Description of PEcAn -The Predictive Ecosystem Analyzer (PEcAn) is a scientific workflow management tool that is designed to simplify the management of model parameterization, execution, and analysis. The goal of PEcAn is to streamline the interaction between data and models, and to improve the efficacy of scientific investigation. PEcAn is an open source utility that encapsulates: +The Predictive Ecosystem Analyzer (PEcAn) is a scientific workflow management +tool that is designed to simplify the management of model parameterization, +execution, and analysis. The goal of PEcAn is to streamline the interaction +between data and models, and to improve the efficacy of scientific +investigation. PEcAn is an open source utility that encapsulates: \enumerate{ \item acquisition of meteorological inputs -\item synthesis of physiological trait data as the posterior distribution of a Bayesian meta-analysis -\item sampling trait meta-analysis posterior distributions to parameterize ensembles of ED2 and other ecophysiological models +\item synthesis of physiological trait data as the posterior distribution of a +Bayesian meta-analysis +\item sampling trait meta-analysis posterior distributions to parameterize +ensembles of ED2 and other ecophysiological models \item probabilistic forecasts -\item postprocessing to constrain forecasts and model parameters with field, meterological, eddy flux, and spectral data, and +\item postprocessing to constrain forecasts and model parameters with field, +meterological, eddy flux, and spectral data, and \item provenance tracking } -PECAn integrates available data into ecological forecasts by running ensembles of a terrestrial ecosystem model that is parameterized by the posterior distribution from a meta-analysis of available plant trait data. -These trait data are assembled from field research and primary literature, and are stored in a PostgreSQL database. Current development focused on biofuel crops uses BETYdb. -In addition to generating forecasts that reflect available data, PEcAn quantifies the contribution of each parameter to model uncertainty. -This information informs targeted data collection and synthesis efforts that most efficiently reduce forecast uncertainty. +PECAn integrates available data into ecological forecasts by running +ensembles of a terrestrial ecosystem model that is parameterized by the +posterior distribution from a meta-analysis of available plant trait data. +These trait data are assembled from field research and primary literature, +and are stored in a PostgreSQL database. Current development focused on +biofuel crops uses BETYdb. In addition to generating forecasts that reflect +available data, PEcAn quantifies the contribution of each parameter to model +uncertainty. This information informs targeted data collection and synthesis +efforts that most efficiently reduce forecast uncertainty. + +Current development is focused on developing PEcAn into a real-time data +assimilation and forecasting system. This system will provide a detailed +analysis of the past and present ecosystem functioning that seamlessly +transitions into forecasts. +} +\author{ +\strong{Maintainer}: Rob Kooper \email{kooper@illinois.edu} + +Authors: +\itemize{ + \item Mike Dietze \email{dietze@bu.edu} + \item David LeBauer \email{dlebauer@email.arizona.edu} + \item Xiaohui Feng \email{feng22@illinois.edu} + \item Dan Wang + \item Carl Davidson \email{davids14@illinois.edu} + \item Shawn Serbin \email{sserbin@bnl.gov} + \item Shashank Singh \email{shashanksingh819@gmail.com} + \item Chris Black \email{chris@ckblack.org} + \item Tanishq Jain \email{tanishqjain010@gmail.com} +} + +Other contributors: +\itemize{ + \item University of Illinois, NCSA [copyright holder] +} -Current development is focused on developing PEcAn into a real-time data assimilation and forecasting system. This system will provide a detailed analysis of the past and present ecosystem functioning that seamlessly transitions into forecasts. } diff --git a/base/utils/man/as.sequence.Rd b/base/utils/man/as.sequence.Rd index 43327f14e38..d08c43347fa 100644 --- a/base/utils/man/as.sequence.Rd +++ b/base/utils/man/as.sequence.Rd @@ -2,7 +2,7 @@ % Please edit documentation in R/utils.R \name{as.sequence} \alias{as.sequence} -\title{as.sequence} +\title{Convert categorical variable into sequential integers} \usage{ as.sequence(x, na.rm = TRUE) } @@ -15,9 +15,6 @@ as.sequence(x, na.rm = TRUE) sequence from 1:length(unique(x)) } \description{ -Convert categorical variable into sequential integers -} -\details{ Turns any categorical variable into a sequential integer. This transformation is required for using data in BUGS/JAGS } diff --git a/base/utils/man/bibtexify.Rd b/base/utils/man/bibtexify.Rd index db981ef1ecc..71c2b0ce360 100644 --- a/base/utils/man/bibtexify.Rd +++ b/base/utils/man/bibtexify.Rd @@ -17,10 +17,7 @@ bibtexify(author, year, title) bibtex citation } \description{ -Convert author, year, title to bibtex citation format -} -\details{ -Converts author year title to author1999abc format +Converts author year title to bibtex \code{author1999abc} format } \author{ unknown diff --git a/base/utils/man/bugs.rdist.Rd b/base/utils/man/bugs.rdist.Rd index 6443e81cc52..13518777c45 100644 --- a/base/utils/man/bugs.rdist.Rd +++ b/base/utils/man/bugs.rdist.Rd @@ -2,7 +2,7 @@ % Please edit documentation in R/r2bugs.distributions.R \name{bugs.rdist} \alias{bugs.rdist} -\title{bugs.rdist} +\title{Sample from an R distribution using JAGS} \usage{ bugs.rdist( prior = data.frame(distn = "norm", parama = 0, paramb = 1), @@ -21,9 +21,6 @@ bugs.rdist( vector of samples } \description{ -Sample from an R distribution using JAGS -} -\details{ Takes a distribution with R parameterization, converts it to a BUGS parameterization, and then samples from the distribution using JAGS diff --git a/base/utils/man/clear.scratch.Rd b/base/utils/man/clear.scratch.Rd index 3a8cc6cccc6..1aaeb969887 100644 --- a/base/utils/man/clear.scratch.Rd +++ b/base/utils/man/clear.scratch.Rd @@ -2,7 +2,7 @@ % Please edit documentation in R/clear.scratch.R \name{clear.scratch} \alias{clear.scratch} -\title{Clear EBI-CLUSTER worker node local scratch directories of old PEcAn output} +\title{Removes previous model run output from worker node local scratch directories on EBI-CLUSTER} \usage{ clear.scratch(settings) } diff --git a/base/utils/man/convert.expr.Rd b/base/utils/man/convert.expr.Rd index 21cc7a95b78..605a17773c4 100644 --- a/base/utils/man/convert.expr.Rd +++ b/base/utils/man/convert.expr.Rd @@ -2,7 +2,7 @@ % Please edit documentation in R/utils.R \name{convert.expr} \alias{convert.expr} -\title{convert.expr} +\title{Convert expression to variable names} \usage{ convert.expr(expression) } diff --git a/base/utils/man/distn.table.stats.Rd b/base/utils/man/distn.table.stats.Rd index 34526bd1448..bd77cb13bf6 100644 --- a/base/utils/man/distn.table.stats.Rd +++ b/base/utils/man/distn.table.stats.Rd @@ -2,8 +2,7 @@ % Please edit documentation in R/distn.stats.R \name{distn.table.stats} \alias{distn.table.stats} -\title{return mean and standard deviation of a distribution for each distribution in a table with \code{colnames = c('distn', 'a', 'b')}, -e.g. in a table of priors} +\title{Helper function for computing summary statistics of a parametric distribution} \usage{ distn.table.stats(distns) } @@ -14,7 +13,8 @@ distn.table.stats(distns) named vector of mean and SD } \description{ -a helper function for computing summary statistics of a parametric distribution +return mean and standard deviation of a distribution for each distribution in a table with \code{colnames = c('distn', 'a', 'b')}, +e.g. in a table of priors } \author{ David LeBauer diff --git a/base/utils/man/download.url.Rd b/base/utils/man/download.url.Rd index 476ab869a24..9187d9f72c5 100644 --- a/base/utils/man/download.url.Rd +++ b/base/utils/man/download.url.Rd @@ -2,7 +2,7 @@ % Please edit documentation in R/download.url.R \name{download.url} \alias{download.url} -\title{Download file from the url.} +\title{Try and download a file.} \usage{ download.url(url, file, timeout = 600, .opts = list(), retry = TRUE) } @@ -22,9 +22,6 @@ protected site use list(userpwd=userpass, httpauth = 1L)} returns name of file if successful or NA if not. } \description{ -Try and download a file. -} -\details{ This will download a file, if retry is set and 404 is returned it will wait until the file is available. If the file is still not available after timeout tries, it will return NA. If the file is downloaded diff --git a/base/utils/man/download_file.Rd b/base/utils/man/download_file.Rd index d2f3bb302c9..97c660c8a81 100644 --- a/base/utils/man/download_file.Rd +++ b/base/utils/man/download_file.Rd @@ -2,7 +2,7 @@ % Please edit documentation in R/utils.R \name{download_file} \alias{download_file} -\title{download_file} +\title{Simple function to use ncftpget for FTP downloads behind a firewall.} \usage{ download_file(url, filename, method) } @@ -15,7 +15,6 @@ download_file(url, filename, method) example options(download.ftp.method="ncftpget")} } \description{ -Simple function to use ncftpget for FTP downloads behind a firewall. Requires ncftpget and a properly formatted config file in the users home directory } diff --git a/base/utils/man/full.path.Rd b/base/utils/man/full.path.Rd index 413c0f16435..5fe7d1bf162 100644 --- a/base/utils/man/full.path.Rd +++ b/base/utils/man/full.path.Rd @@ -2,7 +2,7 @@ % Please edit documentation in R/full.path.R \name{full.path} \alias{full.path} -\title{Creates an absolute path to a folder} +\title{Creates an absolute path to a folder.} \usage{ full.path(folder) } @@ -13,9 +13,6 @@ full.path(folder) absolute path } \description{ -Creates an absolute path to a folder. -} -\details{ This will take a folder and make it into an absolute folder name. It will normalize the path and prepend it with the current working folder if needed to get an absolute path name. diff --git a/base/utils/man/get.run.id.Rd b/base/utils/man/get.run.id.Rd index 0bbe9eac020..675d762416a 100644 --- a/base/utils/man/get.run.id.Rd +++ b/base/utils/man/get.run.id.Rd @@ -2,7 +2,7 @@ % Please edit documentation in R/utils.R \name{get.run.id} \alias{get.run.id} -\title{Get Run ID} +\title{returns an id representing a model run} \usage{ get.run.id(run.type, index, trait = NULL, pft.name = NULL, site.id = NULL) } @@ -22,9 +22,6 @@ ensemble or a quantile used to which a trait has been perturbed for sensitivity id representing a model run } \description{ -returns an id representing a model run -} -\details{ Provides a consistent method of naming runs; for use in model input files and indices } \examples{ diff --git a/base/utils/man/get.sa.sample.list.Rd b/base/utils/man/get.sa.sample.list.Rd index 7cb0dce163d..a1297329602 100644 --- a/base/utils/man/get.sa.sample.list.Rd +++ b/base/utils/man/get.sa.sample.list.Rd @@ -7,9 +7,9 @@ get.sa.sample.list(pft, env, quantiles) } \arguments{ -\item{pft}{Plant Functional Type} +\item{pft}{list of samples from Plant Functional Types} -\item{env}{} +\item{env}{list of samples from environment parameters} \item{quantiles}{quantiles at which to obtain samples from parameter for sensitivity analysis} diff --git a/base/utils/man/get.stats.mcmc.Rd b/base/utils/man/get.stats.mcmc.Rd index 00f6af0a576..f0b71ef60ca 100644 --- a/base/utils/man/get.stats.mcmc.Rd +++ b/base/utils/man/get.stats.mcmc.Rd @@ -2,7 +2,7 @@ % Please edit documentation in R/utils.R \name{get.stats.mcmc} \alias{get.stats.mcmc} -\title{Get stats for parameters in MCMC output} +\title{Further summarizes output from summary.mcmc} \usage{ get.stats.mcmc(mcmc.summary, sample.size) } diff --git a/base/utils/man/load.modelpkg.Rd b/base/utils/man/load.modelpkg.Rd index 95792b3cf6f..06d4aa6cbf7 100644 --- a/base/utils/man/load.modelpkg.Rd +++ b/base/utils/man/load.modelpkg.Rd @@ -13,7 +13,7 @@ load.modelpkg(model) FALSE if function returns error; else TRUE } \description{ -load model package +Load model package } \examples{ \dontrun{require.modelpkg(BioCro)} diff --git a/base/utils/man/misc.are.convertible.Rd b/base/utils/man/misc.are.convertible.Rd index eebddb8fe66..6364fb8e8c2 100644 --- a/base/utils/man/misc.are.convertible.Rd +++ b/base/utils/man/misc.are.convertible.Rd @@ -2,7 +2,7 @@ % Please edit documentation in R/utils.R \name{misc.are.convertible} \alias{misc.are.convertible} -\title{misc.are.convertible} +\title{function to check whether units are convertible by misc.convert function} \usage{ misc.are.convertible(u1, u2) } diff --git a/base/utils/man/misc.convert.Rd b/base/utils/man/misc.convert.Rd index 24b9d1e2a74..116bb896f55 100644 --- a/base/utils/man/misc.convert.Rd +++ b/base/utils/man/misc.convert.Rd @@ -2,7 +2,7 @@ % Please edit documentation in R/utils.R \name{misc.convert} \alias{misc.convert} -\title{misc.convert} +\title{conversion function for the unit conversions that udunits cannot handle but often needed in PEcAn calculations} \usage{ misc.convert(x, u1, u2) } diff --git a/base/utils/man/mstmipvar.Rd b/base/utils/man/mstmipvar.Rd index 111a79d3ee6..592cc971eae 100644 --- a/base/utils/man/mstmipvar.Rd +++ b/base/utils/man/mstmipvar.Rd @@ -2,7 +2,7 @@ % Please edit documentation in R/utils.R \name{mstmipvar} \alias{mstmipvar} -\title{MstMIP variable} +\title{return MstMIP variable as ncvar} \usage{ mstmipvar( name, @@ -30,9 +30,6 @@ mstmipvar( ncvar based on MstMIP definition } \description{ -return MstMIP variable as ncvar -} -\details{ returns a MstMIP variable as a ncvar based on name and other parameters passed in. } diff --git a/base/utils/man/newxtable.Rd b/base/utils/man/newxtable.Rd index 1d480debd1e..4f679494eb9 100644 --- a/base/utils/man/newxtable.Rd +++ b/base/utils/man/newxtable.Rd @@ -2,7 +2,7 @@ % Please edit documentation in R/utils.R \name{newxtable} \alias{newxtable} -\title{newxtable} +\title{New xtable} \usage{ newxtable( x, @@ -25,9 +25,6 @@ newxtable( Latex version of table, with percentages properly formatted } \description{ -New xtable -} -\details{ utility to properly escape the '\%' sign for latex } \author{ diff --git a/base/utils/man/pdf.stats.Rd b/base/utils/man/pdf.stats.Rd index ec4d297eaf1..7608980d4a9 100644 --- a/base/utils/man/pdf.stats.Rd +++ b/base/utils/man/pdf.stats.Rd @@ -2,7 +2,7 @@ % Please edit documentation in R/utils.R \name{pdf.stats} \alias{pdf.stats} -\title{Probability Distirbution Function Statistics} +\title{Probability Distribution Function Statistics} \usage{ pdf.stats(distn, A, B) } diff --git a/base/utils/man/r2bugs.distributions.Rd b/base/utils/man/r2bugs.distributions.Rd index 77d11fc8616..f900f747faa 100644 --- a/base/utils/man/r2bugs.distributions.Rd +++ b/base/utils/man/r2bugs.distributions.Rd @@ -15,9 +15,6 @@ r2bugs.distributions(priors, direction = "r2bugs") priors dataframe using JAGS default parameterizations } \description{ -convert R parameterizations to BUGS paramaterizations -} -\details{ R and BUGS have different parameterizations for some distributions. This function transforms the distributions from R defaults to BUGS defaults. BUGS is an implementation of the BUGS language, and these transformations are expected to work for bugs. } \examples{ diff --git a/base/utils/man/read.output.Rd b/base/utils/man/read.output.Rd index 89602be6a63..29e3dc3ae03 100644 --- a/base/utils/man/read.output.Rd +++ b/base/utils/man/read.output.Rd @@ -73,6 +73,7 @@ Carbon fluxes: GPP, NPP, NEE, TotalResp, AutoResp, HeteroResp, DOC_flux, Fire_flux, and Stem (Stem is specific to the BioCro model) and Water fluxes: Evaporation (Evap), Transpiration (TVeg), surface runoff (Qs), subsurface runoff (Qsb), and rainfall (Rainf). + For more details, see the \href{http://nacp.ornl.gov/MsTMIP_variables.shtml}{MsTMIP variables} documentation. } diff --git a/base/utils/man/retry.func.Rd b/base/utils/man/retry.func.Rd index ad501ab215e..7a3ff9216ef 100644 --- a/base/utils/man/retry.func.Rd +++ b/base/utils/man/retry.func.Rd @@ -2,7 +2,7 @@ % Please edit documentation in R/utils.R \name{retry.func} \alias{retry.func} -\title{retry.func} +\title{Retry function X times before stopping in error} \usage{ retry.func( expr, @@ -28,16 +28,13 @@ retval returns the results of the function call \description{ Retry function X times before stopping in error } -\details{ -Retry function X times before stopping in error -} \examples{ \dontrun{ - file_url <- paste0("https://thredds.daac.ornl.gov/", - "thredds/dodsC/ornldaac/1220", + file_url <- paste0("https://thredds.daac.ornl.gov/", + "thredds/dodsC/ornldaac/1220", "/mstmip_driver_global_hd_climate_lwdown_1999_v1.nc4") dap <- retry.func( - ncdf4::nc_open(file_url) + ncdf4::nc_open(file_url), maxErrors=10, sleep=2) } diff --git a/base/utils/man/rsync.Rd b/base/utils/man/rsync.Rd index b99d2a6b3fa..2beaa2f940b 100644 --- a/base/utils/man/rsync.Rd +++ b/base/utils/man/rsync.Rd @@ -2,7 +2,7 @@ % Please edit documentation in R/utils.R \name{rsync} \alias{rsync} -\title{rsync} +\title{R implementation of rsync} \usage{ rsync(args, from, to, pattern = "") } @@ -19,9 +19,6 @@ rsync(args, from, to, pattern = "") nothing, transfers files as a side effect } \description{ -R implementation of rsync -} -\details{ rsync is a file copying tool in bash } \author{ diff --git a/base/utils/man/ssh.Rd b/base/utils/man/ssh.Rd index bacc286f4d9..a7ad6d83503 100644 --- a/base/utils/man/ssh.Rd +++ b/base/utils/man/ssh.Rd @@ -2,7 +2,7 @@ % Please edit documentation in R/utils.R \name{ssh} \alias{ssh} -\title{SSH} +\title{R implementation of SSH} \usage{ ssh(host, ..., args = "") } diff --git a/base/utils/man/standard_vars.Rd b/base/utils/man/standard_vars.Rd index 7a3b7056d49..533139bc9d5 100644 --- a/base/utils/man/standard_vars.Rd +++ b/base/utils/man/standard_vars.Rd @@ -8,7 +8,7 @@ data frame, all columns character \describe{ \item{Variable.Name}{Short name suitable for programming with} -\item{standard_name}{Name used in the NetCDF \href{http://cfconventions.org/standard-names.html}{CF metadata conventions} } +\item{standard_name}{Name used in the NetCDF \href{http://cfconventions.org/Data/cf-standard-names/current/build/cf-standard-name-table.html}{CF metadata conventions} } \item{Units}{Standard units for this variable. Do not call variables by these names if they are in different units. See \code{ud_convert} for conversions to and from non-standard units} \item{Long.Name}{Human-readable variable name, suitable for e.g. axis labels} diff --git a/base/utils/man/summarize.result.Rd b/base/utils/man/summarize.result.Rd index 28ad6262524..f7cba06d5f1 100644 --- a/base/utils/man/summarize.result.Rd +++ b/base/utils/man/summarize.result.Rd @@ -2,7 +2,7 @@ % Please edit documentation in R/utils.R \name{summarize.result} \alias{summarize.result} -\title{Summarize Results} +\title{Summarize results of replicate observations in trait data query} \usage{ summarize.result(result) } diff --git a/base/utils/man/temp.settings.Rd b/base/utils/man/temp.settings.Rd index fa5e643ba98..af37b29f347 100644 --- a/base/utils/man/temp.settings.Rd +++ b/base/utils/man/temp.settings.Rd @@ -2,7 +2,7 @@ % Please edit documentation in R/utils.R \name{temp.settings} \alias{temp.settings} -\title{temp.settings} +\title{Create a temporary settings file} \usage{ temp.settings(settings.txt) } @@ -13,9 +13,6 @@ temp.settings(settings.txt) character vector written to and read from a temporary file } \description{ -Create a temporary settings file -} -\details{ Uses \code{\link{tempfile}} function to provide a valid temporary file (OS independent) Useful for testing functions that depend on settings file Reference: http://stackoverflow.com/a/12940705/199217 diff --git a/base/utils/man/timezone_hour.Rd b/base/utils/man/timezone_hour.Rd index 225ae05ae58..f63c6485c7b 100644 --- a/base/utils/man/timezone_hour.Rd +++ b/base/utils/man/timezone_hour.Rd @@ -2,7 +2,7 @@ % Please edit documentation in R/timezone_hour.R \name{timezone_hour} \alias{timezone_hour} -\title{timezone_hour} +\title{Timezone Hour} \usage{ timezone_hour(timezone) } @@ -13,7 +13,7 @@ timezone_hour(timezone) hours offset of the timezone } \description{ -return the number of hours offset to UTC for a timezone. +Returns the number of hours offset to UTC for a timezone. } \examples{ \dontrun{ diff --git a/base/utils/man/tryl.Rd b/base/utils/man/tryl.Rd index 79d0789c5b4..9d011f2e15a 100644 --- a/base/utils/man/tryl.Rd +++ b/base/utils/man/tryl.Rd @@ -2,7 +2,7 @@ % Please edit documentation in R/utils.R \name{tryl} \alias{tryl} -\title{tryl} +\title{Test if function gives an error} \usage{ tryl(FUN) } @@ -13,9 +13,6 @@ tryl(FUN) FALSE if function returns error; else TRUE } \description{ -Test if function gives an error -} -\details{ adaptation of try that returns a logical value (FALSE if error) } \examples{ diff --git a/base/utils/man/ud_convert.Rd b/base/utils/man/ud_convert.Rd index 869b18ee389..49622df0237 100644 --- a/base/utils/man/ud_convert.Rd +++ b/base/utils/man/ud_convert.Rd @@ -7,9 +7,11 @@ ud_convert(x, u1, u2) } \arguments{ -\item{x}{numeric vector} +\item{x}{vector of class "numeric" or "difftime"} -\item{u1}{string parseable as the units in which \code{x} is provided} +\item{u1}{string parseable as the units in which \code{x} is provided. If \code{x} is +class "difftime", then \code{u1} is not actually used. However, it still needs +to be supplied and needs to be convertible to \code{u2} for consistency.} \item{u2}{string parseable as the units to convert to} } diff --git a/base/utils/man/vecpaste.Rd b/base/utils/man/vecpaste.Rd index 23b8e4d92e4..66ff229cdd3 100644 --- a/base/utils/man/vecpaste.Rd +++ b/base/utils/man/vecpaste.Rd @@ -2,7 +2,7 @@ % Please edit documentation in R/utils.R \name{vecpaste} \alias{vecpaste} -\title{vecpaste} +\title{Convert vector to comma delimited string} \usage{ vecpaste(x) } @@ -13,8 +13,5 @@ vecpaste(x) comma delimited string } \description{ -Convert vector to comma delimited string -} -\details{ vecpaste, turns vector into comma delimited string fit for SQL statements. } diff --git a/base/utils/man/zero.bounded.density.Rd b/base/utils/man/zero.bounded.density.Rd index c2b423e2c17..0a795c7938f 100644 --- a/base/utils/man/zero.bounded.density.Rd +++ b/base/utils/man/zero.bounded.density.Rd @@ -2,7 +2,7 @@ % Please edit documentation in R/utils.R \name{zero.bounded.density} \alias{zero.bounded.density} -\title{Zero Bounded Density} +\title{Zero bounded density using log density transform} \usage{ zero.bounded.density(x, bw = "SJ", n = 1001) } @@ -17,9 +17,6 @@ zero.bounded.density(x, bw = "SJ", n = 1001) data frame with back-transformed log density estimate } \description{ -Zero bounded density using log density transform -} -\details{ Provides a zero bounded density estimate of a parameter. Kernel Density Estimation used by the \code{\link[stats]{density}} function will cause problems at the left hand end because it will put some weight on negative values. diff --git a/base/utils/scripts/metutils.R b/base/utils/scripts/metutils.R index 79b28047077..0d25fc23506 100644 --- a/base/utils/scripts/metutils.R +++ b/base/utils/scripts/metutils.R @@ -1,11 +1,3 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2012 University of Illinois, NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# University of Illinois/NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- #################################################################################################### #/file # diff --git a/base/utils/scripts/time.constants.R b/base/utils/scripts/time.constants.R index 25387507d69..b6daeb31122 100644 --- a/base/utils/scripts/time.constants.R +++ b/base/utils/scripts/time.constants.R @@ -1,12 +1,3 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2012 University of Illinois, NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# University of Illinois/NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- -#==========================================================================================# #==========================================================================================# # Time conversion units # #------------------------------------------------------------------------------------------# diff --git a/base/utils/tests/Rcheck_reference.log b/base/utils/tests/Rcheck_reference.log index 087e10096eb..025a87bfa66 100644 --- a/base/utils/tests/Rcheck_reference.log +++ b/base/utils/tests/Rcheck_reference.log @@ -12,38 +12,6 @@ Maintainer: ‘Rob Kooper ’ New submission -License components with restrictions and base license permitting such: - BSD_3_clause + file LICENSE -File 'LICENSE': - University of Illinois/NCSA Open Source License - - Copyright (c) 2012, University of Illinois, NCSA. All rights reserved. - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal with the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - - Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimers. - - Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimers in the - documentation and/or other materials provided with the distribution. - - Neither the names of University of Illinois, NCSA, nor the names - of its contributors may be used to endorse or promote products - derived from this Software without specific prior written permission. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - IN NO EVENT SHALL THE CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR - ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF - CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE SOFTWARE. - Strong dependencies not in mainstream repositories: PEcAn.logger, PEcAn.remote @@ -57,7 +25,6 @@ Found the following (possibly) invalid URLs: Status: 404 Message: Not Found -The Date field is over a month old. * checking package namespace information ... OK * checking package dependencies ... OK * checking if this is a source package ... OK @@ -89,11 +56,7 @@ The Date field is over a month old. * checking S3 generic/method consistency ... OK * checking replacement functions ... OK * checking foreign function calls ... OK -* checking R code for possible problems ... NOTE -mcmc.list2init: no visible binding for global variable ‘nr’ - -Undefined global functions or variables: - nr years yieldarray +* checking R code for possible problems ... OK * checking Rd files ... OK * checking Rd metadata ... OK * checking Rd line widths ... OK @@ -108,10 +71,7 @@ See chapter ‘Writing R documentation files’ in the ‘Writing R Extensions’ manual. * checking for code/documentation mismatches ... OK * checking Rd \usage sections ... OK -* checking Rd contents ... WARNING -Argument items with no description in Rd object 'get.sa.sample.list': - ‘env’ - +* checking Rd contents ... OK * checking for unstated dependencies in examples ... OK * checking contents of ‘data’ directory ... OK * checking data for non-ASCII characters ... OK diff --git a/base/utils/tests/testthat.R b/base/utils/tests/testthat.R index 7c41d64906e..fe77b4f21d0 100644 --- a/base/utils/tests/testthat.R +++ b/base/utils/tests/testthat.R @@ -1,11 +1,3 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2012 University of Illinois, NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# University of Illinois/NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- library(testthat) library(PEcAn.utils) diff --git a/base/utils/tests/testthat/data/config.example.php b/base/utils/tests/testthat/data/config.example.php index 653cd64fcfb..fd2e5af4b41 100644 --- a/base/utils/tests/testthat/data/config.example.php +++ b/base/utils/tests/testthat/data/config.example.php @@ -16,10 +16,6 @@ $db_fia_password=""; $db_fia_database=""; -# browdog information -$browndog_url=""; -$browndog_username=""; -$browndog_password=""; # R binary $Rbinary="/usr/bin/R"; diff --git a/base/utils/tests/testthat/test-ud_convert.R b/base/utils/tests/testthat/test-ud_convert.R index 2ef16d2a5ba..bd994f1fb6a 100644 --- a/base/utils/tests/testthat/test-ud_convert.R +++ b/base/utils/tests/testthat/test-ud_convert.R @@ -25,3 +25,15 @@ test_that("output is type numeric and not class \"units\"", { testthat::expect_type(x, "double") }) + +test_that("ud_convert() handles difftime", { + x <- ud_convert(as.difftime("12:00:00"), u1 = "hours", u2 = "days") + expect_is(x, "numeric") + expect_equal(x, 0.5) +}) + +test_that("ud_convert() warns with wrong input units for difftime", { + expect_warning(ud_convert(as.difftime("12:00:00"), u1 = "years", u2 = "minutes")) + #should still error if units are not convertible + expect_error(ud_convert(as.difftime("12:00:00"), u1 = "kilograms", u2 = "minutes")) +}) \ No newline at end of file diff --git a/base/utils/tests/testthat/test.cf2date.R b/base/utils/tests/testthat/test.cf2date.R new file mode 100644 index 00000000000..d45df781b9a --- /dev/null +++ b/base/utils/tests/testthat/test.cf2date.R @@ -0,0 +1,23 @@ +test_that("`cf2datetime()` able to convert CF-style date-time to POSIXct date-time along with taking care of leap years", { + expect_equal(cf2datetime(5, "days since 1981-01-01"), as.POSIXct("1981-01-06", tz = "UTC")) + expect_equal(cf2datetime(27, "minutes since 1963-01-03 12:00:00 -05:00"), as.POSIXct("1963-01-03 17:27:00", tz = "UTC")) + # nom-leap year + expect_equal(cf2datetime(365, "days since 1999-01-01"), as.POSIXct("2000-01-01", tz = "UTC")) + # leap year + expect_equal(cf2datetime(365, "days since 2000-01-01 12:00:00 -05:00"), as.POSIXct("2000-12-31 17:00:00", tz = "UTC")) +}) + +test_that("`datetime2cf()` able to convert POSIXct date-time to CF-style date-time", { + expect_equal(datetime2cf("1990-10-05", "days since 1990-01-01", tz = "UTC"), 277) + expect_equal(datetime2cf("1963-01-03 17:27:00", "minutes since 1963-01-03 12:00:00 -05:00", tz = "UTC"), 27) +}) + +test_that("`datetime2doy()` and `cf2doy()` able to extract Julian day from POSIXct or CF date-times respectively(cf2doy internally converts CF to POSIXct and calls datetime2doy)", { + + # POSIXct date-times + expect_equal(datetime2doy("2010-01-01"), 1) + expect_equal(datetime2doy("2010-01-01 12:00:00"), 1.5) + + # CF date-times + expect_equal(cf2doy(0, "days since 2007-01-01"), 1) +}) \ No newline at end of file diff --git a/base/utils/tests/testthat/test.clear.scratch.R b/base/utils/tests/testthat/test.clear.scratch.R new file mode 100644 index 00000000000..1d4ca0ced1e --- /dev/null +++ b/base/utils/tests/testthat/test.clear.scratch.R @@ -0,0 +1,24 @@ +test_that("`clear.scratch()` able to build the correct system command prompt to remove previous model run output", { + mocked_res <- mockery::mock(TRUE) + mockery::stub(clear.scratch, 'system', mocked_res) + mockery::stub(clear.scratch, 'seq', 0) + settings <- list(host = list(name = "cluster")) + expect_output( + clear.scratch(settings), + ".*Removing.*all.q@compute-0-0.local" + ) + args <- mockery::mock_args(mocked_res) + expect_true( + grepl( + "ssh -T cluster qlogin -q all.q@compute-0-0.local.*clear.scratch.sh", + args[[1]][[1]] + ) + ) + + # host name not cluster + settings <- list(host = list(name = "test")) + expect_output( + clear.scratch(settings), + ".*No output to delete.*" + ) +}) \ No newline at end of file diff --git a/base/utils/tests/testthat/test.days_in_year.R b/base/utils/tests/testthat/test.days_in_year.R new file mode 100644 index 00000000000..6b55cd20fd8 --- /dev/null +++ b/base/utils/tests/testthat/test.days_in_year.R @@ -0,0 +1,5 @@ +test_that("`days_in_year()` correctly returns number of days when provided a year or a vector of years", { + expect_equal(days_in_year(2010), 365) + expect_equal(days_in_year(2012), 366) + expect_equal(days_in_year(2010:2012), c(365, 365, 366)) +}) \ No newline at end of file diff --git a/base/utils/tests/testthat/test.download.url.R b/base/utils/tests/testthat/test.download.url.R new file mode 100644 index 00000000000..3180de253cc --- /dev/null +++ b/base/utils/tests/testthat/test.download.url.R @@ -0,0 +1,12 @@ +test_that("`download.url()` able to create the target dir for file download and passes the correct args to curl_download", { + withr::with_dir(tempdir(), { + mocked_res <- mockery::mock(TRUE) + mockery::stub(download.url, 'url_found', TRUE) + mockery::stub(download.url, 'curl::curl_download', mocked_res) + res <- download.url('http://localhost/', 'test/index.html') + expect_true(file.exists('test')) + args <- mockery::mock_args(mocked_res) + expect_equal(args[[1]]$url, 'http://localhost/') + expect_equal(args[[1]]$destfile, 'test/index.html') + }) +}) \ No newline at end of file diff --git a/base/utils/tests/testthat/test.get.ensemble.inputs.R b/base/utils/tests/testthat/test.get.ensemble.inputs.R new file mode 100644 index 00000000000..025486c81ff --- /dev/null +++ b/base/utils/tests/testthat/test.get.ensemble.inputs.R @@ -0,0 +1,16 @@ +test_that("`get.ensemble.inputs()` able to return desired ensemble inputs from settings", { + settings <- list( + run = list( + inputs = list( + input1 = c(1, 2, 3), + input2 = c("A", "B", "C"), + input3 = c(TRUE, FALSE, TRUE) + ) + ) + ) + res <- get.ensemble.inputs(settings) + expect_equal( + res, + list(input1 = c(1, 2, 3), input2 = c(1, 2, 3), input3 = c(1, 2, 3)) + ) +}) \ No newline at end of file diff --git a/base/utils/tests/testthat/test.listToArgString.R b/base/utils/tests/testthat/test.listToArgString.R new file mode 100644 index 00000000000..dfa1f996a92 --- /dev/null +++ b/base/utils/tests/testthat/test.listToArgString.R @@ -0,0 +1,19 @@ +test_that("`listToArgString()` able to format list of named function args in a comma separated list", { + expect_equal( + listToArgString(c(host = 'pecan', settings = 'test', id = 2020)), + "host='pecan', settings='test', id='2020'" + ) +}) + +test_that("`.parseArg()` works for all different types of entries in the list of function args passed to listToArgString", { + # character + expect_equal(.parseArg('pecan'), "'pecan'") + # NULL + expect_equal(.parseArg(NULL), "NULL") + # list + expect_equal(.parseArg(list(a = 1, b = 2)), "list(a='1', b='2')") + # data.frame + expect_equal(.parseArg(data.frame(a = 1, b = 2)), "data.frame(a =c(' 1 '),b =c(' 2 '))") + # nested list + expect_equal(.parseArg(list(a = 1, b = list(c = 3, d = 4))), "list(a='1', b=list(c='3', d='4'))") +}) \ No newline at end of file diff --git a/base/utils/tests/testthat/test.load_local.R b/base/utils/tests/testthat/test.load_local.R new file mode 100644 index 00000000000..16f35f848fe --- /dev/null +++ b/base/utils/tests/testthat/test.load_local.R @@ -0,0 +1,10 @@ +test_that("`load_local()` able to load file into a list", { + withr::with_tempfile("tf", { + x <- 1:10 + y <- 11:15 + save(x, y, file = tf) + test_list <- load_local(tf) + expect_equal(test_list$x, x) + expect_equal(test_list$y, y) + }) +}) \ No newline at end of file diff --git a/base/utils/tests/testthat/test.n_leap_day.R b/base/utils/tests/testthat/test.n_leap_day.R new file mode 100644 index 00000000000..0b6c55a4180 --- /dev/null +++ b/base/utils/tests/testthat/test.n_leap_day.R @@ -0,0 +1,9 @@ +test_that("`n_leap_day()` able to correctly return number of leap days between 2 specified dates", { + + # having leap days + expect_equal(n_leap_day("2000-01-01", "2003-12-31"), 1) + expect_equal(n_leap_day("2000-01-01", "2004-12-31"), 2) + + # no leap days + expect_equal(n_leap_day("2001-01-01", "2003-12-31"), 0) +}) \ No newline at end of file diff --git a/base/utils/tests/testthat/test.need_packages.R b/base/utils/tests/testthat/test.need_packages.R new file mode 100644 index 00000000000..6ae98f05a30 --- /dev/null +++ b/base/utils/tests/testthat/test.need_packages.R @@ -0,0 +1,11 @@ +test_that("`need_packages()` correctly checks if the required packages are installed", { + + # normal condition : when packages exist + expect_equal(need_packages("stats", "methods"), c("stats", "methods")) + + # error condition + expect_error( + need_packages("notapackage"), + "The following packages are required but not installed: `notapackage`" + ) +}) \ No newline at end of file diff --git a/base/utils/tests/testthat/test.r2bugs.distributions.R b/base/utils/tests/testthat/test.r2bugs.distributions.R new file mode 100644 index 00000000000..dcd414681fd --- /dev/null +++ b/base/utils/tests/testthat/test.r2bugs.distributions.R @@ -0,0 +1,19 @@ +test_that("`r2bugs.distributions()` able to convert R parameterization to BUGS parameterization", { + priors <- data.frame(distn = c('weibull', 'lnorm', 'norm', 'gamma'), + parama = c(1, 1, 1, 1), + paramb = c(2, 2, 2, 2)) + res <- r2bugs.distributions(priors) + expect_equal(res$distn, c("weib", "lnorm", "norm", "gamma")) + expect_equal(res$parama, c(1, 1, 1, 1)) + expect_equal(res$paramb, c(0.50, 0.25, 0.25, 2.00)) +}) + +test_that("`bugs2r.distributions()` able to convert BUGS parameterization to R parameterization", { + priors <- data.frame(distn = c('weib', 'lnorm', 'norm', 'gamma'), + parama = c(1, 1, 1, 1), + paramb = c(0.50, 0.25, 0.25, 2.00)) + res <- bugs2r.distributions(priors) + expect_equal(res$distn, c("weibull", "lnorm", "norm", "gamma")) + expect_equal(res$parama, c(1, 1, 1, 1)) + expect_equal(res$paramb, c(2, 2, 2, 2)) +}) \ No newline at end of file diff --git a/base/utils/tests/testthat/test.seconds_in_year.R b/base/utils/tests/testthat/test.seconds_in_year.R new file mode 100644 index 00000000000..c7db4e92d6a --- /dev/null +++ b/base/utils/tests/testthat/test.seconds_in_year.R @@ -0,0 +1,8 @@ +test_that("`seconds_in_year()` able to return number of seconds in a given year(also for a vector of years)", { + # leap year + expect_equal(seconds_in_year(2000), 31622400) + # non leap year + expect_equal(seconds_in_year(2001), 31536000) + # vector of years + expect_equal(seconds_in_year(2000:2004), c(31622400, 31536000, 31536000, 31536000, 31622400)) +}) \ No newline at end of file diff --git a/base/utils/tests/testthat/test.sendmail.R b/base/utils/tests/testthat/test.sendmail.R new file mode 100644 index 00000000000..990e0d394ae --- /dev/null +++ b/base/utils/tests/testthat/test.sendmail.R @@ -0,0 +1,18 @@ +test_that("`sendmail()` able to create the file with contents to email correctly, also able to build correct command to send the email", { + withr::with_tempfile("tf", { + mocked_res <- mockery::mock(TRUE) + mockery::stub(sendmail, 'system2', mocked_res) + mockery::stub(sendmail, 'tempfile', tf) + mockery::stub(sendmail, 'unlink', NULL) + sendmail('pecan@@example.com', 'carya@@example.com', 'Hi', 'Message from pecan.') + sendmailfile <- readLines(tf) + expect_equal(sendmailfile[1], 'From: pecan@@example.com') + expect_equal(sendmailfile[2], 'Subject: Hi') + expect_equal(sendmailfile[3], 'To: carya@@example.com') + expect_equal(sendmailfile[5], 'Message from pecan.') + args <- mockery::mock_args(mocked_res) + expect_equal(args[[1]][[2]][[1]], '-f') + expect_equal(args[[1]][[2]][[2]], '"pecan@@example.com"') + expect_equal(args[[1]][[2]][[3]], '"carya@@example.com"') + }) +}) \ No newline at end of file diff --git a/base/utils/tests/testthat/test.timezone_hour.R b/base/utils/tests/testthat/test.timezone_hour.R new file mode 100644 index 00000000000..e1ca8dd44d5 --- /dev/null +++ b/base/utils/tests/testthat/test.timezone_hour.R @@ -0,0 +1,7 @@ +test_that("`timezone_hour()` able to correctly return number of hours offset to UTC for a timezone", { + expect_equal(timezone_hour('US/Pacific'), -8) + expect_equal(timezone_hour('US/Eastern'), -5) + + # for numeric + expect_equal(timezone_hour(-8), -8) +}) \ No newline at end of file diff --git a/base/utils/tests/testthat/test.trait.dictionary.R b/base/utils/tests/testthat/test.trait.dictionary.R index 39da807c566..4330d518b65 100644 --- a/base/utils/tests/testthat/test.trait.dictionary.R +++ b/base/utils/tests/testthat/test.trait.dictionary.R @@ -1,12 +1,3 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2012 University of Illinois, NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# University of Illinois/NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- - test_that("trait dictionary loads and has expected columns",{ rm(list = ls()) data(trait.dictionary, package = "PEcAn.utils") diff --git a/base/utils/tests/testthat/test.units_are_equivalent.R b/base/utils/tests/testthat/test.units_are_equivalent.R new file mode 100644 index 00000000000..d5d8b4c4240 --- /dev/null +++ b/base/utils/tests/testthat/test.units_are_equivalent.R @@ -0,0 +1,6 @@ +test_that("`units_are_equivalent()` able to identify if the units are equivalent or not", { + # Equivalent units + expect_true(units_are_equivalent("m/s", "m s-1")) + # Non-equivalent units + expect_error(units_are_equivalent("m/s", "m s-2")) +}) \ No newline at end of file diff --git a/base/utils/tests/testthat/test.utils.R b/base/utils/tests/testthat/test.utils.R index 62112b39873..37478cba081 100644 --- a/base/utils/tests/testthat/test.utils.R +++ b/base/utils/tests/testthat/test.utils.R @@ -1,11 +1,3 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2012 University of Illinois, NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# University of Illinois/NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- context("Other utilities") test.stats <- data.frame(Y=rep(1,5), @@ -161,3 +153,135 @@ test_that("mstmipvar works with args specified", { # "Don't know about variable banana in standard_vars in PEcAn.utils" # ) # }) + + +test_that("`left.pad.zeros()` able to add zeros to the left of a number based on `digits`", { + expect_equal(left.pad.zeros(123), "00123") + expect_equal(left.pad.zeros(42, digits = 3), "042") + expect_equal(left.pad.zeros(42, digits = 1), "42") +}) + +test_that("`zero.truncate()` able to truncate vector at zero", { + input <- c(1, NA, -3, NA, 5) + expect_equal(zero.truncate(input), c(1, 0, 0, 0, 5)) +}) + +test_that("`tabnum()` able to convert positive and negative numbers to `n` significant figures", { + + # case where n specified + x <- c(-2.345, 6.789) + result <- tabnum(x, 2) + expect_equal(result, c(-2.3, 6.8)) + + # case where n is default + result <- tabnum(3.5435) + expect_equal(result, 3.54) +}) + +test_that("`capitalize()` able to capitalize words in a sentence", { + # single word + expect_equal(capitalize("pecan"), "Pecan") + + # sentence with leading and trailing spaces + expect_equal(capitalize(" pecan project "), " Pecan Project ") +}) + +test_that("`bibtexify()` able to convert parameters passed to bibtex citation format", { + expect_equal(bibtexify("author", "1999", "Here Goes The Title"), "author1999HGTT") +}) + +test_that("`rsync()` able to correctly make the command passed to `system` function", { + mocked_res <- mockery::mock(0) + mockery::stub(rsync, 'system', mocked_res) + rsync(args = '-avz', from = 'pecan:test_src', to = 'pecan:test_des') + args <- mockery::mock_args(mocked_res) + expect_equal(args[[1]][[1]], "rsync -avz pecan:test_src pecan:test_des") +}) + +test_that("`ssh()` able to correctly make the command passed to `system` function", { + mocked_res <- mockery::mock(0) + mockery::stub(ssh, 'system', mocked_res) + ssh(host = 'pecan') + args <- mockery::mock_args(mocked_res) + expect_equal(args[[1]][[1]], "ssh -T pecan \"\" ") +}) + +test_that("`temp.settings()` able to create a temporary settings file", { + expect_equal(temp.settings(''), '') +}) + +test_that("`misc.convert()` able to unit conversions for known and unknown units to the function", { + + # units known to misc.convert + expect_equal(misc.convert(1, "kg C m-2 s-1", "umol C m-2 s-1"), 83259094) + # units not known to misc.convert + expect_equal(misc.convert(10, "kg", "g"), 10000) +}) + +test_that("`misc.are.convertible()` able to check if units are convertible by `misc.convert`", { + # units known to misc.convert + expect_true(misc.are.convertible("kg C m-2 s-1", "umol C m-2 s-1")) + # units known but not interconvertible + expect_false(misc.are.convertible("kg C m-2 s-1", "Mg ha-1")) + # units not known to misc.convert + expect_false(misc.are.convertible("kg", "g")) +}) + +test_that("`convert.expr()` able to convert expression to variable names", { + res <- convert.expr("a+b=c+d") + expect_equal(res$variable.drv, "a+b") + expect_equal(res$variable.eqn$variables, c("c", "d")) + expect_equal(res$variable.eqn$expression, "c+d") +}) + +test_that("`paste.stats()` able to print inputs to specific format(for building a Latex Table)", { + expect_equal(paste.stats(3.333333, 5.00001, 6.88888, n = 3), "$3.33(5,6.89)$") +}) + +test_that("`zero.bounded.density()` returns output containing required components", { + res <- zero.bounded.density(c(1, 2, 3)) + expect_true("x" %in% names(res)) + expect_true("y" %in% names(res)) +}) + +test_that("`pdf.stats()` able to calculate mean, variance statistics, and CI from a known distribution", { + expect_equal( + pdf.stats("beta", 1, 2), + unlist(list(mean = 0.33333333, var = 0.05555556, lcl = 0.01257912, ucl = 0.84188612)) + ) +}) + +test_that("`newxtable()` generates correct xtable object", { + data <- data.frame(A = c(1, 2, 3), B = c(4, 5, 6)) + expect_true(grepl("\\hline.*& A & B.*& 1.00 & 4.00.*& 2.00 & 5.00.*& 3.00 & 6.00", newxtable(data))) +}) + +test_that("`tryl()` able to check if a function gives an error when called", { + # case where function does not give an error + expect_true(tryl(1+1)) + + # case where function gives an error + expect_false(tryl(log("a"))) +}) + +test_that("`download_file()` able to correctly construct the inputs command to system function", { + mocked_res <- mockery::mock(0) + mockery::stub(download_file, 'system', mocked_res) + download_file("ftp://testpecan.com", "test", "ncftpget") + args <- mockery::mock_args(mocked_res) + expect_equal(args[[1]][[1]], "ncftpget -c ftp://testpecan.com > test") +}) + +test_that("`retry.func()` able to retry a function before returning an error", { + defaultW <- getOption("warn") + options(warn = -1) + on.exit(options(warn = defaultW)) + expect_error( + retry.func(ncdf4::nc_open("http://pecan"), maxErrors = 2, sleep = 2), + "retry: too many retries" + ) + + # case where function does not give an error + expect_equal(retry.func(1+1, maxErrors = 2, sleep = 2), 2) +}) + diff --git a/base/visualization/DESCRIPTION b/base/visualization/DESCRIPTION index 201bffdada4..8b38be4c664 100644 --- a/base/visualization/DESCRIPTION +++ b/base/visualization/DESCRIPTION @@ -1,8 +1,7 @@ Package: PEcAn.visualization Type: Package Title: PEcAn visualization functions -Version: 1.7.2 -Date: 2021-10-04 +Version: 1.8.0.9000 Authors@R: c(person("Mike", "Dietze", role = c("aut"), email = "dietze@bu.edu"), person("David", "LeBauer", role = c("aut", "cre"), @@ -31,25 +30,27 @@ Description: The Predictive Ecosystem Carbon Analyzer (PEcAn) is a scientific Imports: data.table, ggplot2, - maps, ncdf4 (>= 1.15), - PEcAn.DB, PEcAn.logger, - PEcAn.utils, plyr (>= 1.8.4), reshape2, rlang, stringr(>= 1.1.0) Suggests: grid, + knitr, + mockery, png, raster, + rmarkdown, sp, - testthat (>= 1.0.2) + testthat (>= 1.0.2), + withr License: BSD_3_clause + file LICENSE Copyright: Authors LazyLoad: yes LazyData: FALSE Encoding: UTF-8 -RoxygenNote: 7.2.3 +VignetteBuilder: knitr, rmarkdown +RoxygenNote: 7.3.2 Roxygen: list(markdown = TRUE) diff --git a/base/visualization/LICENSE b/base/visualization/LICENSE index 5a9e44128f1..09ef35a60b4 100644 --- a/base/visualization/LICENSE +++ b/base/visualization/LICENSE @@ -1,34 +1,3 @@ -## This is the master copy of the PEcAn License - -University of Illinois/NCSA Open Source License - -Copyright (c) 2012, University of Illinois, NCSA. All rights reserved. - -PEcAn project -www.pecanproject.org - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal with the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -- Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimers. -- Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimers in the - documentation and/or other materials provided with the distribution. -- Neither the names of University of Illinois, NCSA, nor the names - of its contributors may be used to endorse or promote products - derived from this Software without specific prior written permission. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR -ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF -CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE SOFTWARE. - +YEAR: 2024 +COPYRIGHT HOLDER: PEcAn Project +ORGANIZATION: PEcAn Project, authors affiliations diff --git a/base/visualization/NEWS.md b/base/visualization/NEWS.md new file mode 100644 index 00000000000..04eb05d89a1 --- /dev/null +++ b/base/visualization/NEWS.md @@ -0,0 +1,11 @@ +# PEcAn.visualization 1.8.0.9000 + +## License change +* PEcAn.visualization is now distributed under the BSD three-clause license instead of the NCSA Open Source license. + + +# PEcAn.visualization 1.7.1 + +* All changes in 1.7.1 and earlier were recorded in a single file for all of + the PEcAn packages; please see + https://github.com/PecanProject/pecan/blob/v1.7.1/CHANGELOG.md for details. diff --git a/base/visualization/R/plot_netcdf.R b/base/visualization/R/plot_netcdf.R index 1dcdcac74cd..451e4d89f7f 100644 --- a/base/visualization/R/plot_netcdf.R +++ b/base/visualization/R/plot_netcdf.R @@ -1,12 +1,3 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2012 University of Illinois, NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# University of Illinois/NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- - # ---------------------------------------------------------------------- # PRIVATE FUNCTIONS # ---------------------------------------------------------------------- diff --git a/base/visualization/R/plots.R b/base/visualization/R/plots.R index f93f3eacf10..40bebb9510a 100644 --- a/base/visualization/R/plots.R +++ b/base/visualization/R/plots.R @@ -1,12 +1,3 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2012 University of Illinois, NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# University of Illinois/NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- - ##' Variable-width (diagonally cut) histogram ##' ##' When constructing a histogram, it is common to make all bars the same width. @@ -188,7 +179,7 @@ iqr <- function(x) { ##' ##' Used to add raw data or summary statistics to the plot of a distribution. ##' The height of Y is arbitrary, and can be set to optimize visualization. -##' If SE estimates are available, the se wil be plotted +##' If SE estimates are available, the SE will be plotted ##' @name plot_data ##' @aliases plot.data ##' @title Add data to plot @@ -263,7 +254,7 @@ plot_data <- function(trait.data, base.plot = NULL, ymax) { ##' ##' @return adds borders to ggplot as a side effect ##' @author Rudolf Cardinal -##' @author \url{ggplot2 google group}{https://groups.google.com/forum/?fromgroups#!topic/ggplot2/-ZjRE2OL8lE} +##' @author [ggplot2 google group](https://groups.google.com/forum/?fromgroups#!topic/ggplot2/-ZjRE2OL8lE) ##' @examples ##' \dontrun{ ##' df = data.frame( x=c(1,2,3), y=c(4,5,6) ) diff --git a/base/visualization/R/version.R b/base/visualization/R/version.R new file mode 100644 index 00000000000..0e58d885272 --- /dev/null +++ b/base/visualization/R/version.R @@ -0,0 +1,3 @@ +# Set at package install time, used by pecan.all::pecan_version() +# to identify development versions of packages +.build_hash <- Sys.getenv("PECAN_GIT_REV", "unknown") diff --git a/base/visualization/data/counties.RData b/base/visualization/data/counties.RData deleted file mode 100644 index 8c8f3436afd..00000000000 Binary files a/base/visualization/data/counties.RData and /dev/null differ diff --git a/base/visualization/data/yielddf.RData b/base/visualization/data/yielddf.RData deleted file mode 100644 index 4115c68d5c8..00000000000 Binary files a/base/visualization/data/yielddf.RData and /dev/null differ diff --git a/base/visualization/man/plot_data.Rd b/base/visualization/man/plot_data.Rd index b54ad1dbca8..37d741dbd0b 100644 --- a/base/visualization/man/plot_data.Rd +++ b/base/visualization/man/plot_data.Rd @@ -24,7 +24,7 @@ Add data to an existing plot or create a new one \details{ Used to add raw data or summary statistics to the plot of a distribution. The height of Y is arbitrary, and can be set to optimize visualization. -If SE estimates are available, the se wil be plotted +If SE estimates are available, the SE will be plotted } \examples{ \dontrun{plot_data(data.frame(Y = c(1, 2), se = c(1,2)), base.plot = NULL, ymax = 10)} diff --git a/base/visualization/man/theme_border.Rd b/base/visualization/man/theme_border.Rd index cc77306dc9b..a003e9ffea4 100644 --- a/base/visualization/man/theme_border.Rd +++ b/base/visualization/man/theme_border.Rd @@ -47,5 +47,5 @@ ggplot(data=df, aes(x=x, y=y)) + geom_point() + theme_bw() + \author{ Rudolf Cardinal -\url{ggplot2 google group}{https://groups.google.com/forum/?fromgroups#!topic/ggplot2/-ZjRE2OL8lE} +\href{https://groups.google.com/forum/?fromgroups#!topic/ggplot2/-ZjRE2OL8lE}{ggplot2 google group} } diff --git a/base/visualization/tests/testthat.R b/base/visualization/tests/testthat.R index d296184d0f0..461fd30776d 100644 --- a/base/visualization/tests/testthat.R +++ b/base/visualization/tests/testthat.R @@ -1,11 +1,3 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2012 University of Illinois, NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# University of Illinois/NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- library(testthat) library(PEcAn.visualization) diff --git a/base/visualization/tests/testthat/data/urbana_subdaily_test.nc b/base/visualization/tests/testthat/data/urbana_subdaily_test.nc new file mode 100644 index 00000000000..3e7ceef3671 Binary files /dev/null and b/base/visualization/tests/testthat/data/urbana_subdaily_test.nc differ diff --git a/base/visualization/tests/testthat/test.add_icon.R b/base/visualization/tests/testthat/test.add_icon.R new file mode 100644 index 00000000000..d487b87714e --- /dev/null +++ b/base/visualization/tests/testthat/test.add_icon.R @@ -0,0 +1,7 @@ +test_that("`add_icon()` able to create the correct output file", { + withr::with_dir(tempdir(), { + add_icon(1, 2, 3) + # check if file exists + expect_true(file.exists("Rplots.pdf")) + }) +}) \ No newline at end of file diff --git a/base/visualization/tests/testthat/test.plot_data.R b/base/visualization/tests/testthat/test.plot_data.R new file mode 100644 index 00000000000..25264be0474 --- /dev/null +++ b/base/visualization/tests/testthat/test.plot_data.R @@ -0,0 +1,7 @@ +test_that("`plot_data()` able to create a new plot for data passed to it", { + withr::with_dir(tempdir(), { + res <- plot_data(data.frame(Y = c(1, 2), se = c(1,2), trt = c(1, 2)), base.plot = NULL, ymax = 10) + print(res) + expect_true(file.exists(paste0(getwd(), "/Rplots.pdf"))) + }) +}) \ No newline at end of file diff --git a/base/visualization/tests/testthat/test.plot_netcdf.R b/base/visualization/tests/testthat/test.plot_netcdf.R new file mode 100644 index 00000000000..c0e32252adc --- /dev/null +++ b/base/visualization/tests/testthat/test.plot_netcdf.R @@ -0,0 +1,21 @@ +test_that("`data.fetch()` able to return aggregated data with the correct label", { + nc <- ncdf4::nc_open("./data/urbana_subdaily_test.nc") + on.exit(ncdf4::nc_close(nc)) + + res <- data.fetch("time", nc, mean) + expect_equal(attr(res, "lbl"), "days since 1700-01-01T00:00:00Z") + + res <- data.fetch("air_temperature", nc, mean) + expect_equal(attr(res, "lbl"), "3-hourly Air Temperature at 2m in K") +}) + +test_that("`plot_netcdf()` able to correctly plot the netcdf file data and create the plot file at the desired location", { + nc <- ncdf4::nc_open("./data/urbana_subdaily_test.nc") + withr::with_dir(tempdir(), { + mockery::stub(plot_netcdf, 'ncdf4::nc_open', nc) + res <- plot_netcdf("./data/urbana_subdaily_test.nc", "time", "air_temperature", year = 2010) + + # check if file exists + expect_true(file.exists("Rplots.pdf")) + }) +}) \ No newline at end of file diff --git a/base/visualization/tests/testthat/test.viz.R b/base/visualization/tests/testthat/test.viz.R index 3dffbdd7ca8..d205843a58e 100644 --- a/base/visualization/tests/testthat/test.viz.R +++ b/base/visualization/tests/testthat/test.viz.R @@ -1,11 +1,3 @@ -## ------------------------------------------------------------------------------- -## Copyright (c) 2012 University of Illinois, NCSA. -## All rights reserved. This program and the accompanying materials -## are made available under the terms of the -## University of Illinois/NCSA Open Source License -## which accompanies this distribution, and is available at -## http://opensource.ncsa.illinois.edu/license.html -##------------------------------------------------------------------------------- context("Testing Visualization") diff --git a/base/visualization/vignettes/usmap.Rmd b/base/visualization/vignettes/usmap.Rmd index dff4e2b9ab4..c7d9a9ae0b3 100644 --- a/base/visualization/vignettes/usmap.Rmd +++ b/base/visualization/vignettes/usmap.Rmd @@ -1,7 +1,19 @@ +--- +title: "Maps" +output: html_vignette +vignette: > + %\VignetteIndexEntry{Maps} + %\VignetteEngine{knitr::rmarkdown} +--- + + + Map ======================================================== -```{r} +(all code chunks are disabled because vignette build was throwing errors. TODO: debug and re-enable.) + +```{r,eval=FALSE} require(raster) require(sp) require(ggplot2) @@ -20,7 +32,7 @@ spplot(spdf) ### Plot all maps for BETYdb -```{r} +```{r,eval=FALSE} files <- dir("~/dev/bety/local/modelout", pattern="grid.csv", full.names=TRUE) yieldfiles <- files[!grepl("evapotranspiration", files)] etfiles <- files[grepl("evapotranspiration", files)] @@ -42,7 +54,7 @@ for(file in etfiles){ ``` ### Misc additional code -```{r} +```{r,eval=FALSE} # Make an evenly spaced raster, the same extent as original data e <- extent( spdf ) @@ -63,7 +75,7 @@ ggplot( NULL ) + geom_raster( data = rdf , aes( x , y , fill = layer ) ) ``` -```{r} +```{r,eval=FALSE} # from http://gis.stackexchange.com/a/20052/3218 require(rgdal) proj4string(spdf) <- CRS("+init=epsg:4326") diff --git a/base/workflow/DESCRIPTION b/base/workflow/DESCRIPTION index 123dd17e3d7..252ac4d9fe7 100644 --- a/base/workflow/DESCRIPTION +++ b/base/workflow/DESCRIPTION @@ -1,9 +1,8 @@ Package: PEcAn.workflow Type: Package -Title: PEcAn functions used for ecological forecasts and - reanalysis -Version: 1.7.2 -Date: 2021-10-04 +Title: PEcAn Functions Used for Ecological Forecasts and + Reanalysis +Version: 1.8.0.9000 Authors@R: c(person("Mike", "Dietze", role = c("aut"), email = "dietze@bu.edu"), person("David", "LeBauer", role = c("aut", "cre"), @@ -18,9 +17,6 @@ Authors@R: c(person("Mike", "Dietze", role = c("aut"), person("Shawn", "Serbin", role = c("aut"), email = "sserbin@bnl.gov"), person("University of Illinois, NCSA", role = c("cph"))) -Author: David LeBauer, Mike Dietze, Xiaohui Feng, Dan Wang, - Mike Dietze, Carl Davidson, Rob Kooper, Shawn Serbin -Maintainer: David LeBauer Description: The Predictive Ecosystem Carbon Analyzer (PEcAn) is a scientific workflow management tool that is designed to simplify the management of model @@ -31,20 +27,21 @@ Description: The Predictive Ecosystem Carbon Analyzer that can be used to run the major steps of a PEcAn analysis. License: BSD_3_clause + file LICENSE Imports: - dplyr, - PEcAn.data.atmosphere, - PEcAn.data.land, - PEcAn.DB, - PEcAn.logger, - PEcAn.remote, - PEcAn.settings, - PEcAn.uncertainty, - PEcAn.utils, - purrr (>= 0.2.3), - XML + dplyr, + PEcAn.data.atmosphere, + PEcAn.data.land, + PEcAn.DB, + PEcAn.logger, + PEcAn.remote, + PEcAn.settings, + PEcAn.uncertainty, + PEcAn.utils, + purrr (>= 0.2.3), + XML Suggests: - testthat, - mockery + mockery, + testthat, + withr Copyright: Authors Encoding: UTF-8 -RoxygenNote: 7.2.3 +RoxygenNote: 7.3.2 diff --git a/base/workflow/LICENSE b/base/workflow/LICENSE index 9e38c2dc685..09ef35a60b4 100644 --- a/base/workflow/LICENSE +++ b/base/workflow/LICENSE @@ -1,29 +1,3 @@ -University of Illinois/NCSA Open Source License - -Copyright (c) 2012, University of Illinois, NCSA. All rights reserved. - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal with the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -- Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimers. -- Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimers in the - documentation and/or other materials provided with the distribution. -- Neither the names of University of Illinois, NCSA, nor the names - of its contributors may be used to endorse or promote products - derived from this Software without specific prior written permission. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR -ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF -CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE SOFTWARE. - +YEAR: 2024 +COPYRIGHT HOLDER: PEcAn Project +ORGANIZATION: PEcAn Project, authors affiliations diff --git a/base/workflow/NEWS.md b/base/workflow/NEWS.md index de8f66d5a6a..f8ffcc70da1 100644 --- a/base/workflow/NEWS.md +++ b/base/workflow/NEWS.md @@ -1,4 +1,9 @@ -# PEcAn.workflow 1.7.2.9000 +# PEcAn.workflow 1.8.0.9000 + +* PEcAn.workflow is now distributed under the BSD 3-clause license instead of the NCSA Open Source license. + + +# PEcAn.workflow 1.8.0 * New functions `start_model_runs` and `runModule_start_model_runs`, both moved from package `PEcAn.remote` (where they were `start.model.runs` and diff --git a/base/workflow/R/do_conversions.R b/base/workflow/R/do_conversions.R index 2acf93a4b06..50fd71e812e 100644 --- a/base/workflow/R/do_conversions.R +++ b/base/workflow/R/do_conversions.R @@ -73,10 +73,17 @@ do_conversions <- function(settings, overwrite.met = FALSE, overwrite.fia = FALS ## which is done locally in rundir and then rsync'ed to remote ## rather than having a model-format soils file that is processed remotely } + + # Phenology data extraction + if(input.tag == "leaf_phenology" && is.null(input$path)){ + #settings$run$inputs[[i]]$path <- PEcAn.data.remote::extract_phenology_MODIS(site_info,start_date,end_date,outdir,run_parallel = TRUE,ncores = NULL) + needsave <- TRUE + } + # met conversion if (input.tag == "met") { - name <- ifelse(is.null(settings$browndog), "MET Process", "BrownDog") + name <- "MET Process" if ( (PEcAn.utils::status.check(name) == 0)) { ## previously is.null(input$path) && PEcAn.logger::logger.info("calling met.process: ",settings$run$inputs[[i]][['path']]) settings$run$inputs[[i]] <- @@ -89,7 +96,6 @@ do_conversions <- function(settings, overwrite.met = FALSE, overwrite.fia = FALS host = settings$host, dbparms = settings$database$bety, dir = dbfiles, - browndog = settings$browndog, spin = settings$spin, overwrite = overwrite.met) PEcAn.logger::logger.debug("updated met path: ",settings$run$inputs[[i]][['path']]) diff --git a/base/workflow/R/run.write.configs.R b/base/workflow/R/run.write.configs.R index b4de2736d46..22ed6f3b729 100644 --- a/base/workflow/R/run.write.configs.R +++ b/base/workflow/R/run.write.configs.R @@ -40,10 +40,10 @@ run.write.configs <- function(settings, write = TRUE, ens.sample.method = "unifo if (is.na(posterior.files[i])) { ## otherwise, check to see if posteriorid exists if (!is.null(settings$pfts[[i]]$posteriorid)) { - #TODO: sometimes `files` is a 0x0 tibble and other operations with it fail. + #TODO: sometimes `files` is a 0x0 tibble and other operations with it fail. files <- PEcAn.DB::dbfile.check("Posterior", - settings$pfts[[i]]$posteriorid, - con, settings$host$name, return.all = TRUE) + settings$pfts[[i]]$posteriorid, + con, settings$host$name, return.all = TRUE) pid <- grep("post.distns.*Rdata", files$file_name) ## is there a posterior file? if (length(pid) == 0) { pid <- grep("prior.distns.Rdata", files$file_name) ## is there a prior file? @@ -53,8 +53,29 @@ run.write.configs <- function(settings, write = TRUE, ens.sample.method = "unifo } ## otherwise leave posteriors as NA } ## otherwise leave NA and get.parameter.samples will look for local - } - } + } else { + ## does posterior.files point to a directory instead of a file? + if(utils::file_test("-d",posterior.files[i])){ + pfiles = dir(posterior.files[i],pattern = "post.distns.*Rdata",full.names = TRUE) + if(length(pfiles)>1){ + pid = grep("post.distns.Rdata",pfiles) + if(length(pid > 0)){ + pfiles = pfiles[grep("post.distns.Rdata",pfiles)] + } else { + PEcAn.logger::logger.error( + "run.write.configs: could uniquely identify posterior files within", + posterior.files[i]) + } + posterior.files[i] = pfiles + } + } + ## also, double check PFT outdir exists + if (is.null(settings$pfts[[i]]$outdir) || is.na(settings$pfts[[i]]$outdir)) { + ## no outdir + settings$pfts[[i]]$outdir <- file.path(settings$outdir, "pfts", settings$pfts[[i]]$name) + } + } ## end else + } ## end for loop over pfts ## Sample parameters model <- settings$model$type @@ -62,7 +83,18 @@ run.write.configs <- function(settings, write = TRUE, ens.sample.method = "unifo options(scipen = 12) PEcAn.uncertainty::get.parameter.samples(settings, posterior.files, ens.sample.method) - load(file.path(settings$outdir, "samples.Rdata")) + samples.file <- file.path(settings$outdir, "samples.Rdata") + if (file.exists(samples.file)) { + samples <- new.env() + load(samples.file, envir = samples) ## loads ensemble.samples, trait.samples, sa.samples, runs.samples, env.samples + trait.samples <- samples$trait.samples + ensemble.samples <- samples$ensemble.samples + sa.samples <- samples$sa.samples + runs.samples <- samples$runs.samples + ## env.samples <- samples$env.samples + } else { + PEcAn.logger::logger.error(samples.file, "not found, this file is required by the run.write.configs function") + } ## remove previous runs.txt if (overwrite && file.exists(file.path(settings$rundir, "runs.txt"))) { diff --git a/base/workflow/R/runModule.run.write.configs.R b/base/workflow/R/runModule.run.write.configs.R index fd168d6082b..265da4f6c1c 100644 --- a/base/workflow/R/runModule.run.write.configs.R +++ b/base/workflow/R/runModule.run.write.configs.R @@ -20,14 +20,14 @@ runModule.run.write.configs <- function(settings, overwrite = TRUE) { #check to see if there are posterior.files tags under pft - posterior.files.vec<-settings$pfts %>% + posterior.files <-settings$pfts %>% purrr::map(purrr::possibly('posterior.files', NA_character_)) %>% purrr::modify_depth(1, function(x) { ifelse(is.null(x), NA_character_, x) }) %>% unlist() - return(PEcAn.workflow::run.write.configs(settings, write, ens.sample.method, posterior.files = posterior.files.vec, overwrite = overwrite)) + return(PEcAn.workflow::run.write.configs(settings, write, ens.sample.method, posterior.files = posterior.files, overwrite = overwrite)) } else { stop("runModule.run.write.configs only works with Settings or MultiSettings") } diff --git a/base/workflow/R/start_model_runs.R b/base/workflow/R/start_model_runs.R index 9aabc8dd0e1..12ef228704e 100644 --- a/base/workflow/R/start_model_runs.R +++ b/base/workflow/R/start_model_runs.R @@ -1,12 +1,3 @@ -##------------------------------------------------------------------------------- -## Copyright (c) 2012 University of Illinois, NCSA. All rights reserved. This -## program and the accompanying materials are made available under the terms of -## the University of Illinois/NCSA Open Source License which accompanies this -## distribution, and is available at -## http://opensource.ncsa.illinois.edu/license.html -##------------------------------------------------------------------------------- - - #' Start selected ecosystem model runs within PEcAn workflow #' #' @param settings pecan settings object @@ -23,7 +14,7 @@ start_model_runs <- function(settings, write = TRUE, stop.on.error = TRUE) { run_file <- file.path(settings$rundir, "runs.txt") # check if runs need to be done - if (!file.exists(file.path(settings$rundir, "runs.txt"))) { + if (!file.exists(run_file)) { PEcAn.logger::logger.warn( "runs.txt not found, assuming no runs need to be done") return() @@ -292,7 +283,7 @@ start_model_runs <- function(settings, write = TRUE, stop.on.error = TRUE) { job_finished <- FALSE if (is_rabbitmq) { job_finished <- - file.exists(file.path(settings$modeloutdir, run, "rabbitmq.out")) + file.exists(file.path(jobids[run], "rabbitmq.out")) } else if (is_qsub) { job_finished <- PEcAn.remote::qsub_run_finished( run = jobids[run], @@ -301,10 +292,10 @@ start_model_runs <- function(settings, write = TRUE, stop.on.error = TRUE) { } if (job_finished) { - + # TODO check output log if (is_rabbitmq) { - data <- readLines(file.path(settings$modeloutdir, run, "rabbitmq.out")) + data <- readLines(file.path(jobids[run], "rabbitmq.out")) if (data[-1] == "ERROR") { msg <- paste("Run", run, "has an ERROR executing") if (stop.on.error) { @@ -372,4 +363,4 @@ runModule_start_model_runs <- function(settings, stop.on.error=TRUE) { PEcAn.logger::logger.severe( "runModule_start_model_runs only works with Settings or MultiSettings") } -} # runModule_start_model_runs +} # runModule_start_model_runs \ No newline at end of file diff --git a/base/workflow/R/version.R b/base/workflow/R/version.R new file mode 100644 index 00000000000..0e58d885272 --- /dev/null +++ b/base/workflow/R/version.R @@ -0,0 +1,3 @@ +# Set at package install time, used by pecan.all::pecan_version() +# to identify development versions of packages +.build_hash <- Sys.getenv("PECAN_GIT_REV", "unknown") diff --git a/base/workflow/tests/Rcheck_reference.log b/base/workflow/tests/Rcheck_reference.log index db508a93b02..68accd78d29 100644 --- a/base/workflow/tests/Rcheck_reference.log +++ b/base/workflow/tests/Rcheck_reference.log @@ -48,11 +48,6 @@ Strong dependencies not in mainstream repositories: PEcAn.data.atmosphere, PEcAn.data.land, PEcAn.DB, PEcAn.logger, PEcAn.remote, PEcAn.settings, PEcAn.uncertainty, PEcAn.utils -The Title field should be in title case. Current version is: -‘PEcAn functions used for ecological forecasts and reanalysis’ -In title case that is: -‘PEcAn Functions Used for Ecological Forecasts and Reanalysis’ - The Date field is over a month old. * checking package namespace information ... OK * checking package dependencies ... OK @@ -67,15 +62,7 @@ The Date field is over a month old. * checking installed package size ... OK * checking package directory ... OK * checking for future file timestamps ... OK -* checking DESCRIPTION meta-information ... NOTE -Author field differs from that derived from Authors@R - Author: ‘David LeBauer, Mike Dietze, Xiaohui Feng, Dan Wang, Mike Dietze, Carl Davidson, Rob Kooper, Shawn Serbin’ - Authors@R: ‘Mike Dietze [aut], David LeBauer [aut, cre], Xiaohui Feng [aut], Dan Wang [aut], Carl Davidson [aut], Rob Kooper [aut], Shawn Serbin [aut], University of Illinois, NCSA [cph]’ - -Maintainer field differs from that derived from Authors@R - Maintainer: ‘David LeBauer ’ - Authors@R: ‘David LeBauer ’ - +* checking DESCRIPTION meta-information ... OK * checking top-level files ... OK * checking for left-over files ... OK * checking index information ... OK @@ -93,14 +80,7 @@ Maintainer field differs from that derived from Authors@R * checking S3 generic/method consistency ... OK * checking replacement functions ... OK * checking foreign function calls ... OK -* checking R code for possible problems ... NOTE -run.write.configs: no visible binding for global variable - ‘trait.samples’ -run.write.configs: no visible binding for global variable ‘sa.samples’ -run.write.configs: no visible binding for global variable - ‘ensemble.samples’ -Undefined global functions or variables: - ensemble.samples sa.samples trait.samples +* checking R code for possible problems ... OK * checking Rd files ... OK * checking Rd metadata ... OK * checking Rd line widths ... OK @@ -119,4 +99,4 @@ Undefined global functions or variables: * checking for detritus in the temp directory ... OK * DONE -Status: 1 WARNING, 2 NOTEs +Status: 1 WARNING, 1 NOTEs diff --git a/base/workflow/tests/testthat/test.do_conversions.R b/base/workflow/tests/testthat/test.do_conversions.R new file mode 100644 index 00000000000..a28a09667ff --- /dev/null +++ b/base/workflow/tests/testthat/test.do_conversions.R @@ -0,0 +1,31 @@ +test_that("`do_conversions` able to return settings from pecan.METProcess.xml if it already exists", { + withr::with_tempdir({ + settings <- list(host = list(name = 'test', folder = 'test'), outdir = getwd()) + file_path <- file.path(getwd(), "pecan.METProcess.xml") + file.create(file_path) + writeLines( + " + test + ", + file_path + ) + ret <- do_conversions(settings) + expect_equal(ret$outdir, "test") + }) +}) + +test_that("`do_conversions` able to call met.process if the input tag has met, update the met path and save settings to pecan.METProcess.xml", { + withr::with_tempdir({ + mocked_res <- mockery::mock(list(path = 'test')) + mockery::stub(do_conversions, 'PEcAn.data.atmosphere::met.process', mocked_res) + settings <- list( + host = list(name = 'test', folder = 'test'), + outdir = getwd(), + run = list(site = list(id = 0), inputs = list(met = list(id = 1))) + ) + res <- do_conversions(settings) + mockery::expect_called(mocked_res, 1) + expect_equal(res$run$inputs$met$path, 'test') + expect_true(file.exists(file.path(getwd(), "pecan.METProcess.xml"))) + }) +}) diff --git a/base/workflow/tests/testthat/test.start_model_runs.R b/base/workflow/tests/testthat/test.start_model_runs.R new file mode 100644 index 00000000000..052950e2c93 --- /dev/null +++ b/base/workflow/tests/testthat/test.start_model_runs.R @@ -0,0 +1,20 @@ +test_that("`start_model_runs` throws a warning if runs.txt not provided", { + withr::with_tempdir({ + PEcAn.logger::logger.setUseConsole(TRUE, FALSE) + on.exit(PEcAn.logger::logger.setUseConsole(TRUE, TRUE)) + settings <- list(rundir = getwd()) + expect_output(start_model_runs(settings), "runs.txt not found") + }) +}) + +test_that("`start_model_runs` throws a warning if runs.txt is empty", { + withr::with_tempdir({ + PEcAn.logger::logger.setUseConsole(TRUE, FALSE) + on.exit(PEcAn.logger::logger.setUseConsole(TRUE, TRUE)) + settings <- list(rundir = getwd()) + file_path <- file.path(getwd(), "runs.txt") + file.create(file_path) + expect_output(start_model_runs(settings), "runs.txt found, but is empty") + }) +}) + diff --git a/book_source/01_introduction/01_project_overview.Rmd b/book_source/01_introduction/01_project_overview.Rmd index 847072590b9..d9b00767bfb 100644 --- a/book_source/01_introduction/01_project_overview.Rmd +++ b/book_source/01_introduction/01_project_overview.Rmd @@ -2,7 +2,7 @@ The Predictive Ecosystem Analyzer (PEcAn) is an integrated informatics toolbox for ecosystem modeling (Dietze et al. 2013, LeBauer et al. 2013). PEcAn consists of: -1. An application program interface (API) that encapsulates an ecosystem model, providing a common interface, inputs, and output. +1. An application program interface (API) that encapsulates an ecosystem model, providing a common interface, inputs, and output 2. Core utilities for handling and tracking model runs and the flows of information and uncertainties into and out of models and analyses 3. An accessible web-based user interface and visualization tools 4. An extensible collection of modules to handle specific types of analyses (sensitivity, uncertainty, ensemble), model-data syntheses (benchmarking, parameter data assimilation, state data assimilation), and data processing (model inputs and data constraints) diff --git a/book_source/02_demos_tutorials_workflows/01_install_pecan.Rmd b/book_source/02_demos_tutorials_workflows/01_install_pecan.Rmd index eb26d63e729..1d038862542 100644 --- a/book_source/02_demos_tutorials_workflows/01_install_pecan.Rmd +++ b/book_source/02_demos_tutorials_workflows/01_install_pecan.Rmd @@ -81,7 +81,7 @@ This will not go into much detail about about how to use Docker -- for more deta This should print the current version of docker-compose. We have tested the instruction below with versions of docker compose 1.22 and above. -3. **Download the PEcAn docker compose file**. It is located in the root directory of the [PEcAn source code](https://github.com/pecanproject/pecan). For reference, here are direct links to the [latest stable version](https://raw.githubusercontent.com/PecanProject/pecan/master/docker-compose.yml) and the [bleeding edge development version](https://raw.githubusercontent.com/PecanProject/pecan/develop/docker-compose.yml). (To download the files, you should be able to right click the link and select "Save link as".) Make sure the file is saved as `docker-compose.yml` in a directory called `pecan`. +3. **Download the PEcAn docker compose file**. It is located in the root directory of the [PEcAn source code](https://github.com/pecanproject/pecan). For reference, here are direct links to the [latest stable version](https://raw.githubusercontent.com/PecanProject/pecan/main/docker-compose.yml) and the [bleeding edge development version](https://raw.githubusercontent.com/PecanProject/pecan/develop/docker-compose.yml). (To download the files, you should be able to right click the link and select "Save link as".) Make sure the file is saved as `docker-compose.yml` in a directory called `pecan`. 4. **Initialize the PEcAn database and data images**. The following `docker compose` commands are used to download all the data PEcAn needs to start working. For more on how they work, see our [Docker topical pages](#pecan-docker-quickstart-init). diff --git a/book_source/02_demos_tutorials_workflows/02_user_demos/01_introductions_user.Rmd b/book_source/02_demos_tutorials_workflows/02_user_demos/01_introductions_user.Rmd index c9bdc0e81b9..2487ab7a9d3 100644 --- a/book_source/02_demos_tutorials_workflows/02_user_demos/01_introductions_user.Rmd +++ b/book_source/02_demos_tutorials_workflows/02_user_demos/01_introductions_user.Rmd @@ -13,16 +13,16 @@ The following Tutorials assume you have installed PEcAn. If you have not, please |Type|Title|Web Link| Source Rmd| |:--:|:---:|:------:|:---------:| |Demo| Basic Run| [html](#demo-1) | [Rmd](https://github.com/PecanProject/pecan/blob/develop/documentation/tutorials/01_Demo_Basic_Run/Demo01.Rmd)| -|Demo| Uncertainty Analysis| [html](#demo-2) | [Rmd](https://github.com/PecanProject/pecan/tree/master/documentation/tutorials/02_Demo_Uncertainty_Analysis)| +|Demo| Uncertainty Analysis| [html](#demo-2) | [Rmd](https://github.com/PecanProject/pecan/tree/main/documentation/tutorials/02_Demo_Uncertainty_Analysis)| |Demo| Output Analysis|html |[Rmd](https://github.com/PecanProject/pecan/tree/develop/documentation/tutorials/AnalyzeOutput)| |Demo| MCMC |html|[Rmd](https://github.com/PecanProject/pecan/tree/develop/documentation/tutorials/MCMC)| |Demo|Parameter Assimilation |html |[Rmd](https://github.com/PecanProject/pecan/tree/develop/documentation/tutorials/ParameterAssimilation)| |Demo|State Assimilation|html|[Rmd](https://github.com/PecanProject/pecan/tree/develop/documentation/tutorials/StateAssimilation)| |Demo| Sensitivity|html|[Rmd](https://github.com/PecanProject/pecan/tree/develop/documentation/tutorials/sensitivity)| -|Vignette|Allometries|html|[Rmd](https://github.com/PecanProject/pecan/blob/master/modules/allometry/vignettes/AllomVignette.Rmd)| -|Vignette|MCMC|html|[Rmd](https://github.com/PecanProject/pecan/blob/master/modules/assim.batch/vignettes/AssimBatchVignette.Rmd)| -|Vignette|Meteorological Data|html|[Rmd](https://github.com/PecanProject/pecan/tree/master/modules/data.atmosphere/vignettes)| -|Vignette|Meta-Analysis|html|[Rmd](https://github.com/PecanProject/pecan/blob/master/modules/meta.analysis/vignettes/single.MA_demo.Rmd)| -|Vignette|Photosynthetic Response Curves|html|[Rmd](https://github.com/PecanProject/pecan/blob/master/modules/photosynthesis/vignettes/ResponseCurves.Rmd)| -|Vignette|Priors|html|[Rmd](https://github.com/PecanProject/pecan/blob/master/modules/priors/vignettes/priors_demo.Rmd)| -|Vignette|Leaf Spectra:PROSPECT inversion|html|[Rmd](https://github.com/PecanProject/pecan/blob/master/modules/rtm/vignettes/pecanrtm.vignette.Rmd)| +|Vignette|Allometries|html|[Rmd](https://github.com/PecanProject/pecan/blob/main/modules/allometry/vignettes/AllomVignette.Rmd)| +|Vignette|MCMC|html|[Rmd](https://github.com/PecanProject/pecan/blob/main/modules/assim.batch/vignettes/AssimBatchVignette.Rmd)| +|Vignette|Meteorological Data|html|[Rmd](https://github.com/PecanProject/pecan/tree/main/modules/data.atmosphere/vignettes)| +|Vignette|Meta-Analysis|html|[Rmd](https://github.com/PecanProject/pecan/blob/main/modules/meta.analysis/vignettes/single.MA_demo.Rmd)| +|Vignette|Photosynthetic Response Curves|html|[Rmd](https://github.com/PecanProject/pecan/blob/main/modules/photosynthesis/vignettes/ResponseCurves.Rmd)| +|Vignette|Priors|html|[Rmd](https://github.com/PecanProject/pecan/blob/main/modules/priors/vignettes/priors_demo.Rmd)| +|Vignette|Leaf Spectra:PROSPECT inversion|html|[Rmd](https://github.com/PecanProject/pecan/blob/main/modules/rtm/vignettes/pecanrtm.vignette.Rmd)| diff --git a/book_source/02_demos_tutorials_workflows/02_user_demos/05_advanced_user_guide/93_workflow_curl.Rmd b/book_source/02_demos_tutorials_workflows/02_user_demos/05_advanced_user_guide/93_workflow_curl.Rmd index e9c325eebfc..9bfd6ebeee3 100644 --- a/book_source/02_demos_tutorials_workflows/02_user_demos/05_advanced_user_guide/93_workflow_curl.Rmd +++ b/book_source/02_demos_tutorials_workflows/02_user_demos/05_advanced_user_guide/93_workflow_curl.Rmd @@ -33,8 +33,6 @@ sensitivity=-1,1 pecan_edit=on # redirect to edit the model configuration files model_edit=on -# use browndog -browndog=on ``` For example the following will run the above workflow. Using -v in curl will show verbose output (needed) and the grep will make sure it only shows the redirect. This will show the actual workflowid: diff --git a/book_source/02_demos_tutorials_workflows/03_web_workflow.Rmd b/book_source/02_demos_tutorials_workflows/03_web_workflow.Rmd index 997c4d5faa0..350fd2b27c5 100644 --- a/book_source/02_demos_tutorials_workflows/03_web_workflow.Rmd +++ b/book_source/02_demos_tutorials_workflows/03_web_workflow.Rmd @@ -200,7 +200,7 @@ At the moment, what is functional is a prototype workflow that works for invento This tool works with an internal copy of the FIA that is uploaded to a postGRES database along side BETY, however for space reasons this database does not ship with the PEcAn VM. To turn this feature on: 1. Download and Install the FIA database. Instructions in [Installing data for PEcAn] -2. For web-base runs, specify the database settings in the [config.php](https://github.com/PecanProject/pecan/blob/master/web/config.example.php) +2. For web-base runs, specify the database settings in the [config.php](https://github.com/PecanProject/pecan/blob/main/web/config.example.php) 3. For R-based runs, specify the database settings in the [THE PEcAn XML] More detailed information on how PEcAn processes inputs can be found on our [Input Conversion]page. diff --git a/book_source/02_demos_tutorials_workflows/04_more_web_interface/00_intermediate_users_guide.Rmd b/book_source/02_demos_tutorials_workflows/04_more_web_interface/00_intermediate_users_guide.Rmd index 2536efe60d7..0b778b14098 100644 --- a/book_source/02_demos_tutorials_workflows/04_more_web_interface/00_intermediate_users_guide.Rmd +++ b/book_source/02_demos_tutorials_workflows/04_more_web_interface/00_intermediate_users_guide.Rmd @@ -3,7 +3,6 @@ This section will provide information to those wanting to take advantage of PEcAn's customizations from the web interface. * [Additional web configuration] - Advanced options available from the web interface - - [Brown Dog](#browndog) - [Sensitivity and ensemble analyses][Advanced Setup][TODO: Under construction...] - [Editing model configurations][TODO: Under construction...] * [Settings-configured analyses] - Analyses only available by manually editing `pecan.xml` diff --git a/book_source/02_demos_tutorials_workflows/04_more_web_interface/01_additional_web_config.Rmd b/book_source/02_demos_tutorials_workflows/04_more_web_interface/01_additional_web_config.Rmd index 6e2c20c9905..b0bd053be7b 100644 --- a/book_source/02_demos_tutorials_workflows/04_more_web_interface/01_additional_web_config.Rmd +++ b/book_source/02_demos_tutorials_workflows/04_more_web_interface/01_additional_web_config.Rmd @@ -3,7 +3,6 @@ Additional settings for web configuration: - [Web interface setup]{#intermediate-web-setup} -- [Brown Dog]{#browndog} - [Advanced setup]{#intermediate-advanced-setup} - [Sensitivity analysis] (TODO) - [Uncertainty analysis] (TODO) @@ -19,26 +18,16 @@ The list of configuration available 1. **Database configuration** : BETYdb(Biofuel Ecophysiological Traits and Yields database) configuration details, can be edited according to need. -2. **Browndog configuration** : Browndog configuration details, Used to connect browndog. Its included by default in VM. +2. **FIA Database** : FIA(Forest Inventory and Analysis) Database configuration details, Can be used to add additional data to models. -3. **FIA Database** : FIA(Forest Inventory and Analysis) Database configuration details, Can be used to add additional data to models. +3. **Google MapKey** : Google Map key, used to access the google map by PEcAn. -4. **Google MapKey** : Google Map key, used to access the google map by PEcAn. +4. **Change Password** : A small infomation to change the VM user password. (if using Docker image it won't work) -5. **Change Password** : A small infomation to change the VM user password. (if using Docker image it won't work) - -6. **Automatic Sync** : If ON then it will sync the database between local machine and the remote servers. **Still unders testing part might be buggy**. +5. **Automatic Sync** : If ON then it will sync the database between local machine and the remote servers. **Still unders testing part might be buggy**. Still work on the adding other editing feature going on, this page will be updated as new configuration will be available. -### Brown Dog {#browndog} - -The Browndog service provides PEcAn with access to large and diverse sets of data at the click of a button in the format that PEcAn needs. By clicking the checkbox you will be using the Browndog Service to process data. - -For more information regarding meteorological data check out [Available Meteorological Drivers](#met-drivers). - -More information can be found at the [Browndog website](http://browndog.ncsa.illinois.edu/). - ### Advanced Setup {#intermediate-advanced-setup} (TODO: Under construction...) diff --git a/book_source/02_demos_tutorials_workflows/04_more_web_interface/02_hidden_analyses.Rmd b/book_source/02_demos_tutorials_workflows/04_more_web_interface/02_hidden_analyses.Rmd index dd60e020fc6..9eaaed7a847 100644 --- a/book_source/02_demos_tutorials_workflows/04_more_web_interface/02_hidden_analyses.Rmd +++ b/book_source/02_demos_tutorials_workflows/04_more_web_interface/02_hidden_analyses.Rmd @@ -105,7 +105,7 @@ This is the main ensemble Kalman filter and generalized filter code. Originally, * IC - (optional) initial condition matrix (dimensions: ensemble memeber # by state variables). Default is NULL. -* Q - (optional) process covariance matrix (dimensions: state variable by state variables). Defualt is NULL. +* Q - (optional) process covariance matrix (dimensions: state variable by state variables). Default is NULL. #### State Data Assimilation Workflow Before running sda.enkf, these tasks must be completed (in no particular order), @@ -182,7 +182,7 @@ Create diagnostics #### State Data Assimilation Tags Descriptions -* **adjustment** : [optional] TRUE/FLASE flag for if ensembles needs to be adjusted based on weights estimated given their likelihood during analysis step. The defualt is TRUE for this flag. +* **adjustment** : [optional] TRUE/FLASE flag for if ensembles needs to be adjusted based on weights estimated given their likelihood during analysis step. The Default is TRUE for this flag. * **process.variance** : [optional] TRUE/FLASE flag for if process variance should be estimated (TRUE) or not (FALSE). If TRUE, a generalized ensemble filter will be used. If FALSE, an ensemble Kalman filter will be used. Default is FALSE. If you use the TRUE argument you can set three more optional tags to control the MCMCs built for the generalized esnsemble filter. * **nitrGEF** : [optional] numeric defining the length of the MCMC chains. * **nthin** : [optional] numeric defining thining length for the MCMC chains. @@ -289,14 +289,50 @@ $n_{t+1} = \frac{\sum_{i=1}^{I}\sum_{j=1}^{J}\frac{v_{ijt}^{2}+v_{iit}v_{jjt}}{V where we calculate the mean of the process covariance matrix $\left(\bar{\boldsymbol{Q}_{t}}\right)$ from the posterior samples at time t. Degrees of freedom for the Wishart are typically calculated element by element where $v_{ij}$ are the elements of $\boldsymbol{V}_{t}$. $I$ and $J$ index rows and columns of $\boldsymbol{V}$. Here, we calculate a mean number of degrees of freedom for $t+1$ by summing over all the elements of the scale matrix $\left(\boldsymbol{V}\right)$ and dividing by the count of those elements $\left(I\times J\right)$. We fit this model sequentially through time in the R computing environment using R package 'rjags.' -Users have control over how they think is the best way to estimate $Q$. Our code will look for the tag `q.type` in the XML settings under `state.data.assimilation` which can take 3 values of Single, PFT or Site. If `q.type` is set to single then one value of process variance will be estimated across all different sites or PFTs. On the other hand, when q.type` is set to Site or PFT then a process variance will be estimated for each site or PFT at a cost of more time and computation power. +Users have control over how they think is the best way to estimate $Q$. Our code will look for the tag `q.type` in the XML settings under `state.data.assimilation` which can take 4 values of Single, Site, Vector, or Wishart. If `q.type` is set to single then one value of process variance will be estimated across all different sites and variables. When `q.type` is set to Site then a process variance will be estimated for each siteat a cost of more time and computation power. When the `q.type` is set to Vector or Wishart then process errors for each variable of each site will be estimated and propagated through time, while the Wishart Q support the estimation of covariance between sites and variables through the MCMC sampling of wishart distributions, which further support the propagation of process error through not just time but space and variables. #### Multi-site State data assimilation. +`sda.enkf.multisite` is housed within: `/pecan/modules/assim.sequential/R` + +The 4-site tutorial is housed within: `~/pecan/modules/assim.sequential/inst/MultiSite-Exs` + +#### **sda.enkf.multisite.R Description** `sda.enkf.multisite` function allows for assimilation of observed data at multiple sites at the same time. In order to run a multi-site SDA, one needs to send a multisettings pecan xml file to this function. This multisettings xml file needs to contain information required for running at least two sites under `run` tag. The code will automatically run the ensembles for all the sites and reformats the outputs matching the required formats for analysis step. +#### **sda.enkf.multisite.R Arguments** +* settings - (required) [State Data Assimilation Tags Example] settings object + +* obs.mean - (required) Lists of date times named by time points, which contains lists of sites named by site ids, which contains observation means for each state variables of each site for each time point. + +* obs.cov - (required) Lists of date times named by time points, which contains lists of sites named by site ids, which contains observation covariances for all state variables of each site for each time point. + +* Q - (optional) Process covariance matrix given if there is no data to estimate it. Default is NULL. + +* restart - (optional) Used for iterative updating previous forecasts. Default NULL. List object includes file path to previous runs and start date for SDA. Default is NULL. + +* pre_enkf_params - (optional) Used for carrying out SDA with pre-existed `enkf.params`, in which the `Pf`, `aqq`, and `bqq` can be used for the analysis step. Default is NULL. + +* ensemble.samples - (optional) Pass ensemble.samples from outside, which are lists of calibrated parameters for different plant functional types. This also helps to avoid GitHub check issues. Default is NULL. + +* control - (optional) List of flags controlling the behavior of the SDA. Here is an example of the `control` list. The detailed explanation of them are shown inside the `sda.enkf.multisite` function in the `assim.sequential` package. +``` +control=list(trace = TRUE, + TimeseriesPlot = FALSE, + debug = FALSE, + pause = FALSE, + Profiling = FALSE, + OutlierDetection=FALSE, + parallel_qsub = TRUE, + send_email = NULL, + keepNC = TRUE, + run_parallel = TRUE, + MCMC.args = NULL) +``` -The observed mean and cov needs to be formatted as list of different dates with observations. For each element of this list also there needs to be a list with mean and cov matrices of different sites named by their siteid. In case that zero variance was estimated for a variable inside the obs.cov, the SDA code will automatically replace that with half of the minimum variance from other non-zero variables in that time step. +#### **obs.mean and obs.cov Description** +The observations are required for passing the time points into the SDA workflow, which should match the start and end date in the settings object. For the generations of obs.mean and obs.cov, please use the function `SDA_OBS_Assembler` inside the `assim.sequential` package. For the unconstrained runs, please specify the `free.run` flag as TRUE inside the `settings$state.data.assimilation$Obs_Prep` section. Otherwise, please specify the arguments that are needed for preparations of different observations (note that, the observation preparation functions currently only support `MODIS LAI`, `Landtrendr AGB`, `SMAP soil moisture`, and `SoilGrid soil organic carbon`). For the details about how to setup those arguments, please reference the `Create_Multi_settings.R` script inside `~/pecan/modules/assim.sequential/inst/MultiSite-Exs/SDA` directory. +The observed mean and covariance need to be formatted as list of different dates with observations. For each element of this list also there needs to be a list with mean and cov matrices of different sites named by their siteid. In case that zero variance was estimated for a variable inside the obs.cov, the SDA code will automatically replace that with half of the minimum variance from other non-zero variables in that time step. -This would look like something like this: +Here are examples of the `obs.mean` and `obs.cov` for single time point, two sites, and two observations. ``` > obs.mean @@ -324,34 +360,127 @@ $`2010/12/31`$`1000000651` [1,] 15.2821691 0.513584319 [2,] 0.1213583 0.001162113 ``` -An example of multi-settings pecan xml file also may look like below: +#### Anlysis SDA workflow +Before running the SDA analysis functions, the ensemble forecast results have to be generated, and arguments such as H matrix, MCMC arguments, and multi-site Y and R (by `Construct.R` function) have to be generated as well. Here are the workflows for three types of SDA analysis functions that we are currently used. + +* Decide which analysis function to be used. Here we have three options: 1) traditional ensemble Kalman Filter (`EnKF.MultiSite`) with analytical solution, within which the process error needs to be prescribed from outside (see `Q` arguments in the `sda.enkf.multisite` function); 2) generalized ensemble Kalman Filter (`GEF.MultiSite`); and 3) block-based generalized ensemble Kalman Filter (`analysis_sda_block`). The latter two methods support the feature of propagating process variance across space and time. To choose the analysis method 1, we need to set the `process.variance` as FALSE. Otherwise, if we set the `process.variance` as TRUE and provide the `q.type` as either `SINGLE` or `SITE` the method `GEF.MultiSite` will be used, and if we provide the `q.type` as either `vector` or `wishart` the method `analysis_sda_block` will be used. The explanations for different Q types can be found in the `The Generalized Ensemble Filter` section in this documentation. For the `analysis_sda_block` method, there is also a special case for complete or partial missing of observations. + +* If we decide to use `EnKF.MultiSite`, then the analysis results will be calculated based on equations. + +* If we decide to use `GEF.MultiSite`, then it will first do censoring process based on how you setup the `censored.data` flag within settings xml file. Then, if t equals to 1, we will first initialize the `aqq`, `bqq`, `aq`, and `bq` based on how you setup the `q.type` argument within settings xml file. After preparing the initial conditions (ICs), data, and constants for the `GEF.MultiSite.Nimble` nimble model, the MCMC sampling will happen afterwards. Finally, the process variance and the analysis results will be calculated and updated and returned to the `sda.enkf.multisite` function. + +* If we decide to use `analysis_sda_block` method, then if t equals 1, the workflow will first build blocks using the `matrix_network` function for the calculations for indexes of variables by networks of site groups based on the spatial scale (see `scalef` argument in the `Example of multi-settings pecan xml file` section) that we specify inside the `state.data.assimilation` section. Then, the workflow will execute the `build.block.xy` to automatically initialize the overall block-based MCMC lists (`block.list.all`) and fill in datasets (`mu.f`, `y.censored`, or `Pf`) for each block and for all time points to facilitate the process of passing arguments between scripts/functions. After that, the `MCMC_args` (see the explanations inside the roxygen structure of the `sda.enkf.multisite` function) will be specified either from the `control` list or by default (see below). Then, the workflow will update the process error using the `update_q` function. If t equals 1, it will initialize the arguments for the process error. Otherwise, it will grab the previously updated process error. After that, in the `MCMC_Init` function, it will create the initial conditions (ICs) for the MCMC sampling based on randomly sampled data. Then, the completed block-based arguments (`block.list.all`) will be used by the `MCMC_block_function` function under the parallel mode. Finally, the block-based results will be converted into long vectors and large matrices by the `block.2.vector` function and values such as `block.list.all`, `mu.f`, `mu.a`, `Pf`, `Pa` will be returned to the `sda.enkf.multisite` function. + +``` +MCMC.args <- list(niter = 1e5, + nthin = 10, + nchain = 3, + nburnin = 1e4) +``` + +* There is a special case for the `analysis_sda_block` method where NA values appear in the observations, which provides the opportunity for the estimations of process error without any observations. This special case currently is only working under restricted conditions when we set the `scalef` as 0 (that feature currently only works for isolated-site SDA runs) and turn on the `free.run` flag in the settings, which will then automatically fill in NA values to the observations by each site (see bellow). + +``` + + 0 + TRUE + +``` + + +#### Example of multi-settings pecan xml file +Here is an example of what does a multi-settings pecan xml file look like. The detailed explanation for the xml file can be found under the `Multi-Settings` section in the `03_pecan_xml.Rmd` documentation. + ``` - FALSE - TRUE - - 1000000040 - 1000013298 - - - - GWBI - KgC/m^2 - 0 - 9999 - - - AbvGrndWood - KgC/m^2 - 0 - 9999 - - - 1960/01/01 - 2000/12/31 - + TRUE + 1 + 1 + FALSE + TRUE + FALSE + TRUE + FALSE + sipnet.out + SINGLE + FALSE + Local.support + 1 + 5 + + + AbvGrndWood + MgC/ha + 0 + 9999 + + + LAI + + 0 + 9999 + + + SoilMoistFrac + + 0 + 100 + + + TotSoilCarb + kg/m^2 + 0 + 9999 + + + + + /projectnb/dietzelab/dongchen/Multi-site/download_500_sites/AGB + TRUE + TRUE + + year + 1 + + + + 30 + TRUE + TRUE + + year + 1 + + + + 30 + TRUE + FALSE + + year + 1 + + + + + year + 1 + + + /projectnb/dietzelab/dongchen/All_NEON_SDA/test_OBS + 2012-07-15 + 2021-07-15 + + + 2004/01/01 + 2006/12/31 + + 1 + 2004/01/01 + 2006/12/31 + -1 diff --git a/book_source/02_demos_tutorials_workflows/05_developer_workflows/02_git/01_using-git.Rmd b/book_source/02_demos_tutorials_workflows/05_developer_workflows/02_git/01_using-git.Rmd index 3a25be86f07..c4054bde55b 100644 --- a/book_source/02_demos_tutorials_workflows/05_developer_workflows/02_git/01_using-git.Rmd +++ b/book_source/02_demos_tutorials_workflows/05_developer_workflows/02_git/01_using-git.Rmd @@ -48,7 +48,7 @@ We follow branch organization laid out on [this page](http://nvie.com/posts/a-su In short, there are three main branches you must be aware of: * **develop** - Main Branch containing the latest code. This is the main branch you will make changes to. -* **master** - Branch containing the latest stable code. DO NOT MAKE CHANGES TO THIS BRANCH. +* **main** - Branch containing the latest stable code. DO NOT MAKE CHANGES TO THIS BRANCH. * **release/vX.X.X** - Named branches containing code specific to a release. Only make changes to this branch if you are fixing a bug on a release branch. #### Milestones, Issues, Tasks @@ -121,7 +121,7 @@ git remote add upstream git@github.com:PecanProject/pecan.git ##### Hint: Keeping your fork in sync -If you have used the instructions above, you can use the helper script called [`scripts/syncgit.sh`](https://github.com/PecanProject/pecan/blob/master/scripts/syncgit.sh) to keep the master and develop branches of your own fork in sync with the PEcAnProject/pecan repository. +If you have used the instructions above, you can use the helper script called [`scripts/syncgit.sh`](https://github.com/PecanProject/pecan/blob/main/scripts/syncgit.sh) to keep the main and develop branches of your own fork in sync with the PEcAnProject/pecan repository. After following the above, your .git/config file will include the following: @@ -144,7 +144,7 @@ Then, you can run: ./scripts/syncgit.sh ``` -Now the master and develop branches on your fork will be up to date. +Now the main and develop branches on your fork will be up to date. #### Using Branching @@ -199,7 +199,7 @@ git push origin #### After pull request is merged -1. Make sure you start in master +1. Make sure you start in main ```sh git checkout develop` diff --git a/book_source/02_demos_tutorials_workflows/05_developer_workflows/03_coding_practices/04-roxygen.Rmd b/book_source/02_demos_tutorials_workflows/05_developer_workflows/03_coding_practices/04-roxygen.Rmd index 2638bd8c0c7..a0e74052316 100644 --- a/book_source/02_demos_tutorials_workflows/05_developer_workflows/03_coding_practices/04-roxygen.Rmd +++ b/book_source/02_demos_tutorials_workflows/05_developer_workflows/03_coding_practices/04-roxygen.Rmd @@ -70,3 +70,24 @@ PEcAn's automated testing (Travis) will check if any documentation is out of dat These files were changed by the build process: {...} ``` + + + +#### Updating to a new Roxygen version + +For consistency across packages and machines, all PEcAn developers need to compile documentation with the same version of Roxygen. Roxygen itself will check for this and refuse to rebuild a package that was last touched by a newer version of Roxygen, but the warning it gives is very quiet and easy to miss. We take a louder approach by hardcoding the expected Roxygen version into PEcAn's Makefile and throwing a build failure if the installed Roxygen is not an exact match. + +When it is time for everyone to update to a newer Roxygen, follow the same procedure we used when updating from 7.2.3 to 7.3.1, replacing version strings as appropriate: + +* Before starting, work with the team to merge/close as many existing PRs as feasible -- this process touches a lot of files and is likely to create merge conflicts in other PRs. +* Edit the Makefile to change `EXPECTED_ROXYGEN_VERSION := 7.2.3` to `EXPECTED_ROXYGEN_VERSION := 7.3.1`. +* Run `make clean && make document` to be sure Roxygen has been run on all packages. +* Check the console output for warnings from Roxygen, and fix them as needed. New versions often get pickier about formatting issues that used to be considered minor. +* Run `./scripts/generate_dependencies.R` to update the version of Roxygen recorded as a Docker dependency. +* Grep the PEcAn folder for the string `7.2.3` to make sure no references were missed. + - e.g. this time I found a remaining `RoxygenNote: 7.2.3` in models/cable/DESCRIPTION -- Make currently skips cable, so I redocumented it manually. +* Review all changes. + - The changes should mostly just consist of updated `RoxygenNote:` lines in all the DESCRIPTION files. + - In all cases but extra-double-specially if any NAMESPACE files change, make sure you understand what happened rather than blindly committing the changes. Usually the new version is an improvement, but this is the time to check. +* Once all looks good, commit and push. +* Make a loud announcement, e.g. on Slack, to tell all developers to update roxygen2 on their machines as soon as the PR is merged. diff --git a/book_source/03_topical_pages/02_pecan_standards.Rmd b/book_source/03_topical_pages/02_pecan_standards.Rmd index 819f8df8fd5..35703a846ef 100644 --- a/book_source/03_topical_pages/02_pecan_standards.Rmd +++ b/book_source/03_topical_pages/02_pecan_standards.Rmd @@ -10,11 +10,11 @@ Internal PEcAn standard time follows ISO_8601 format for dates and time (https:/ To aid in the conversion between PEcAn internal ISO_8601 standard and CF convention used in all met drivers and PEcAn standard output you can utilize the functions: "cf2datetime","datetime2doy","cf2doy", and for SIPNET "sipnet2datetime" -### Input Standards +## Input Standards -#### Meterology Standards +### Meteorology Standards -##### Dimensions: +#### Dimensions: |CF standard-name | units | @@ -26,13 +26,14 @@ To aid in the conversion between PEcAn internal ISO_8601 standard and CF convent General Note: dates in the database should be date-time (preferably with timezone), and datetime passed around in PEcAn should be of type POSIXct. -##### The variable names should be `standard_name` +#### Variable names should be `standard_name` + ```{r, echo=FALSE,warning=FALSE,message=FALSE} names<-c("air_temperature", "air_temperature_max", "air_temperature_min", "air_pressure", "mole_fraction_of_carbon_dioxide_in_air", "moisture_content_of_soil_layer", "soil_temperature ", - "relative_humidity", "specific_humidity", "water_vapor_saturation_deficit","surface_downwelling_longwave_flux_in_air", + "relative_humidity", "specific_humidity", "water_vapor_saturation_deficit", "surface_downwelling_longwave_flux_in_air", "surface_downwelling_shortwave_flux_in_air", "surface_downwelling_photosynthetic_photon_flux_in_air", - "precipitation_flux", " ","wind_speed","eastward_wind", "northward_wind") + "precipitation_flux", " ", "wind_speed", "eastward_wind", "northward_wind") units <-c("K","K","K","Pa","mol/mol","kg m-2","K","%","1","Pa","W m-2","W m-2","mol m-2 s-1", "kg m-2 s-1", "degrees", "m/s", "m/s", "m/s") @@ -45,7 +46,7 @@ narr <- c("air","tmax","tmin","","","","","rhum","shum","","dlwrf","dswrf","","a ameriflux <- c("TA(C)","" ,"" , "PRESS (KPa)","CO2","","TS1(NOT DONE)", "RH","CALC(RH)","VPD(NOT DONE)","Rgl","Rg","PAR(NOT DONE)","PREC","WD","WS","CALC(WS+WD)","CALC(WS+WD)") -in_tab<-cbind(names,units,bety,isimip,cruncep,narr,ameriflux) +in_tab <- cbind(names, units, bety, isimip, cruncep, narr, ameriflux) colnames(in_tab)<- c("CF standard-name","units","BETY","Isimip","CRUNCEP","NARR", "Ameriflux") if (require("DT")){ datatable(in_tab, extensions = c('FixedColumns',"Buttons"), @@ -53,10 +54,10 @@ datatable(in_tab, extensions = c('FixedColumns',"Buttons"), dom = 'Bfrtip', scrollX = TRUE, fixedColumns = TRUE, - buttons = c('copy', 'csv', 'excel', 'pdf', 'print') - - ) + buttons = c('copy', 'csv', 'excel', 'pdf', 'print'), + escape = FALSE ) + ) } ``` @@ -66,8 +67,7 @@ datatable(in_tab, extensions = c('FixedColumns',"Buttons"), * standard_name is CF-convention standard names * units can be converted by udunits, so these can vary (e.g. the time denominator may change with time frequency of inputs) * soil moisture for the full column, rather than a layer, is soil_moisture_content -* A full list of PEcAn standard variable names, units and dimensions can be found here: https://github.com/PecanProject/pecan/blob/develop/base/utils/data/standard_vars.csv - +* The list of PEcAn standard variable names, units and dimensions are provided in a table in the [Output Standards]{#OutputStandards} section and maintained in the file: [base/utils/data/standard_vars.csv](https://github.com/PecanProject/pecan/blob/develop/base/utils/data/standard_vars.csv). For example, in the [MsTMIP-CRUNCEP](https://www.betydb.org/inputs/280) data, the variable `rain` should be `precipitation_rate`. We want to standardize the units as well as part of the `met2CF.` step. I believe we want to use the CF "canonical" units but retain the MsTMIP units any time CF is ambiguous about the units. @@ -76,17 +76,15 @@ The key is to process each type of met data (site, reanalysis, forecast, climate ### Soils and Vegetation Inputs -##### Soil Data - -Check out the [Soil Data] section on more into on creating a standard soil data file. - -##### Vegetation Data +#### Soil Data -Check Out the [Vegetation Data] section on more info on creating a standard vegetation data file +See the [Soil Data] section on more into on creating a standard soil data file. +#### Vegetation Data +See the [Vegetation Data] section on more info on creating a standard vegetation data file -### Output Standards +## Output Standards {#OutputStandards} * created by `model2netcdf` functions * based on format used by [MsTMIP](http://nacp.ornl.gov/MsTMIP_variables.shtml) diff --git a/book_source/03_topical_pages/03_pecan_xml.Rmd b/book_source/03_topical_pages/03_pecan_xml.Rmd index aa2ccf8a89c..4e6e6715a6a 100644 --- a/book_source/03_topical_pages/03_pecan_xml.Rmd +++ b/book_source/03_topical_pages/03_pecan_xml.Rmd @@ -19,7 +19,6 @@ It contains the following major sections ("nodes"): - [`parameter.data.assimilation`](#xml-parameter-data-assimilation) -- Parameter data assimilation - [`multi.settings`](#xml-multi-settings) -- Multi Site Settings - (experimental) [`state.data.assimilation`](#xml-state-data-assimilation) -- State data assimilation - - (experimental) [`browndog`](#xml-browndog) -- Brown Dog configuration - (experimental) [`benchmarking`](#xml-benchmarking) -- Benchmarking - [`remote_process`](#xml-remote_process) -- Remote data module @@ -474,7 +473,7 @@ PEcAn tries to detect and throw informative errors when dates are out of bounds The following tags are optional run settings that apply to any model: -* `jobtemplate`: the template used when creating a `job.sh` file, which is used to launch the actual model. Each model has its own default template in the `inst` folder of the corresponding R package (for instance, here is the one for [ED2](https://github.com/PecanProject/pecan/blob/master/models/ed/inst/template.job)). The following variables can be used: `@SITE_LAT@`, `@SITE_LON@`, `@SITE_MET@`, `@START_DATE@`, `@END_DATE@`, `@OUTDIR@`, `@RUNDIR@` which all come variables in the `pecan.xml` file. The following two command can be used to copy and clean the results from a scratch folder (specified as scratch in the run section below, for example local disk vs network disk) : `@SCRATCH_COPY@`, `@SCRATCH_CLEAR@`. +* `jobtemplate`: the template used when creating a `job.sh` file, which is used to launch the actual model. Each model has its own default template in the `inst` folder of the corresponding R package (for instance, here is the one for [ED2](https://github.com/PecanProject/pecan/blob/main/models/ed/inst/template.job)). The following variables can be used: `@SITE_LAT@`, `@SITE_LON@`, `@SITE_MET@`, `@START_DATE@`, `@END_DATE@`, `@OUTDIR@`, `@RUNDIR@` which all come variables in the `pecan.xml` file. The following two command can be used to copy and clean the results from a scratch folder (specified as scratch in the run section below, for example local disk vs network disk) : `@SCRATCH_COPY@`, `@SCRATCH_CLEAR@`. * `stop_on_error`: (logical) Whether the workflow should immediately terminate if _any_ of the model runs fail. If unset, this defaults to `TRUE` unless you are running an ensemble simulation (and ensemble size is greater than 1). @@ -749,8 +748,20 @@ The following tags can be used for state data assimilation. More detailed inform ```xml - FALSE + TRUE + 1 + 1 FALSE + TRUE + FALSE + TRUE + FALSE + sipnet.out + SINGLE + FALSE + Local.support + 1 + 5 AbvGrndWood @@ -826,7 +837,19 @@ The following tags can be used for state data assimilation. More detailed inform ``` * **process.variance** : [optional] TRUE/FLASE flag for if process variance should be estimated (TRUE) or not (FALSE). If TRUE, a generalized ensemble filter will be used. If FALSE, an ensemble Kalman filter will be used. Default is FALSE. +* **aqq.Init** : [optional] The initial value of `aqq` used for estimate the Q distribution, the default value is 1 (note that, the `aqq.init` and `bqq.init` right now only work on the `VECTOR` q type, and we didn't account for the variabilities of them across sites or variables, meaning we initialize the `aqq` and `bqq` given single value). +* **bqq.Init** : [optional] The initial value of `bqq` used for estimate the Q distribution, the default value is 1. * **sample.parameters** : [optional] TRUE/FLASE flag for if parameters should be sampled for each ensemble member or not. This allows for more spread in the intial conditions of the forecast. +* **adjustment** : [optional] Bool variable decide if you want to adjust analysis results by the likelihood. +* **censored.data** : [optional] Bool variable decide if you want to do MCMC sampling for the forecast ensemble space, the default is FALSE. +* **FullYearNC** : [optional] Bool variable decide if you want to generate the full-year netcdf file when there is a overlap in time, the default is TRUE. +* **NC.Overwrite** : [optional] Bool variable decide if you want to overwrite the previous netcdf file when there is a overlap in time, the default is FALSE. +* **NC.Prefix** : [optional] The prefix for the generation of the full-year netcdf file, the default is sipnet.out. +* **q.type** : [optional] The type of process variance that will be estimated, the default is SINGLE. +* **free.run** : [optional] If it's a free run without any observations, the default is FALSE. +* **Localization.FUN** : [optional] The localization function name for the localization operation, the default is Local.support. +* **scalef** : [optional] The scale parameter used for the localization operation, the smaller the value is, the sites are more isolated. +* **chains** : [optional] The number of chains needed to be estimated during the MCMC sampling process. * **_NOTE:_** If TRUE, you must also assign a vector of trait names to pick.trait.params within the sda.enkf function. * **state.variable** : [required] State variable that is to be assimilated (in PEcAn standard format, with pre-specified variable name, unit, and range). Four variables can be assimilated so far: including Aboveground biomass (AbvGrndWood), LAI, SoilMoistFrac, and Soil carbon (TotSoilCarb). * **Obs_Prep** : [required] This section will be handled through the SDA_Obs_Assembler function, if you want to proceed with this function, this section is required. @@ -948,27 +971,6 @@ For the MODIS LAI and SMAP soil moisture observations, the `search_window` speci * **run_parallel** : [optional] This tag defines if you want to proceed the MODIS LAI function parallely, the default value is FALSE. -### (experimental) Brown Dog {#xml-browndog} - -This section describes how to connect to [Brown Dog](http://browndog.ncsa.illinois.edu). This facilitates processing and conversions of data. - -```xml - - ... - ... - ... - -``` - -* `url`: (required) endpoint for Brown Dog to be used. -* `username`: (optional) username to be used with the endpoint for Brown Dog. -* `password`: (optional) password to be used with the endpoint for Brown Dog. - -This information is currently used by the following R functions: - -- `PEcAn.data.atmosphere::met.process` -- Generic function for processing meteorological input data. -- `PEcAn.benchmark::load_data` -- Generic, versatile function for loading data in various formats. - ### (experimental) Benchmarking {#xml-benchmarking} Coming soon... diff --git a/book_source/03_topical_pages/04_R_workflow.Rmd b/book_source/03_topical_pages/04_R_workflow.Rmd index e14aeab59f6..c1d8fed0145 100644 --- a/book_source/03_topical_pages/04_R_workflow.Rmd +++ b/book_source/03_topical_pages/04_R_workflow.Rmd @@ -22,6 +22,8 @@ Within the PEcAn repository, code pertaining to input conversion is in the MODUL ## Initial Conditions {#workflow-input-initial} +### CONUS (NEON/FIA/BADM) Initial Conditions. + To convert initial condition data into the PEcAn Standard and then into the model formats we follow three main steps: 1. Downloading vegetation info @@ -52,6 +54,21 @@ This function will create ensemble member ncdf files by resampling the veg file put_veg_module() This function will convert the ensemble member ncdf files into model specific format. Currently the supported models are ED2 and SIPNET. +### North America (NA) Initial Conditions. + +To create initial condition files across North America, you will need to strictly follow the script located at `~/pecan/modules/assim.sequential/inst/anchor/IC_prep_anchorSites.Rmd`. Within the script we will be following those main steps: + 1. Loading `settings.xml` file and specify paths. + 2. Downloading/extracting estimations of four major carbon/water pools (leaf, wood, soil C, soil water) into by-site and by-ensemble tables. + 3. Doing unit conversion. For each ensemble of each site, we will be preparing the `poolinfo` object consisting of converted pool estimations. + 4. We will finally be writing the NC files through the `PEcAn.SIPNET::veg2model.SIPNET` function. + 5. Within the loop, we will store the NC file paths into the settings object and rewrite the settings into the XML file to the destination. + +Within the script we proposed the following new datasets for handling the NA initial condition preparations: + + 1. The leaf carbon is initialized with MODIS LAI observations and the SLA for the corresponding PFT. + 2. The above ground biomass (AGB) is initialized with the 2010 global AGB map (DOI: https://doi.org/10.3334/ORNLDAAC/1763). + 3. The soil moisture (SM) is initialized with the SM estimations starting from 1978 (DOI: 10.24381/cds.d7782f18). + 4. The soil organic carbon (SOC) is initialized with ISCN SOC estimations (data already prepared on PEcAn, use `PEcAn.data.land::iscn_soc` to load) based on the level 2 ecoregion map (pre-downloaded using the following link: https://www.epa.gov/eco-research/ecoregions). ## Meteorological Data {#workflow-met} @@ -104,6 +121,12 @@ The main script that handles Met Processing, is [`met.process`](https://github.c ### Converting from PEcAn standard to model-specific format {#workflow-met-model} +## Input phenological Data {#workflow-input-phenology} + +To enable the use of MODIS phenology data (MODIS Land Cover Dynamics (MCD12Q2)) to update the phenological parameters (leaf-on date) and (leaf-off date) at each restart timepoint: + 1. Generate the phenological parameter CSV file by running `PEcAn.data.remote::extract_phenology_MODIS`. + 2. Provide the generated phenological parameter CSV file to `settings$run$inputs$leaf_phenology$path`. + ## Traits {#workflow-traits} (TODO: Under construction) @@ -113,6 +136,7 @@ The main script that handles Met Processing, is [`met.process`](https://github.c (TODO: Under construction) ## Model Configuration {#workflow-modelconfig} +To enable the state data assimilation with sub-annual data, default `conflict` in `model2netcdf` should be `TRUE`. (TODO: Under construction) diff --git a/book_source/03_topical_pages/05_models/ed.Rmd b/book_source/03_topical_pages/05_models/ed.Rmd index eaa9c46ca21..a94df3b9d45 100644 --- a/book_source/03_topical_pages/05_models/ed.Rmd +++ b/book_source/03_topical_pages/05_models/ed.Rmd @@ -171,7 +171,7 @@ ED2 is configured using 2 files which are placed in the run folder. * **ED2IN** : template for this file is located at models/ed/inst/ED2IN.\. The values in this template that need to be modified are described below and are surrounded with @ symbols. * **config.xml** : this file is generated by PEcAn. Some values are stored in the pecan.xml in \\\ section as well as in \ section. -An example of the template can be found in [ED2IN.r82](https://github.com/PecanProject/pecan/blob/master/models/ed/inst/ED2IN.r82) +An example of the template can be found in [ED2IN.r82](https://github.com/PecanProject/pecan/blob/main/models/ed/inst/ED2IN.r82) The ED2IN template can contain the following variables. These will be replaced with actual values when the model configuration is written. diff --git a/book_source/03_topical_pages/05_models/ldndc.Rmd b/book_source/03_topical_pages/05_models/ldndc.Rmd index 287a6f7be6e..553977d4c16 100644 --- a/book_source/03_topical_pages/05_models/ldndc.Rmd +++ b/book_source/03_topical_pages/05_models/ldndc.Rmd @@ -10,73 +10,95 @@ ### Introduction -LandscapeDNDC is designed to simulate biosphere/hydrosphere processes of forest, arable and grassland ecosystems. It is modularly designed and therefore allows for various model selections to be plugged in. Most up-to-date information from LDNDC can be found here, as well as the authors, users guide and documentation of the model: https://ldndc.imk-ifu.kit.edu/index.php +LandscapeDNDC is designed to simulate biosphere/hydrosphere processes of forest, arable and grassland ecosystems. The design of LDNDC allows different models/modules to be plugged in, allowing the simulations for different ecosystems. The most up-to-date information on LDNDC can be found here, as well as the authors, users guide and documentation of the model: https://ldndc.imk-ifu.kit.edu/index.php + +Please note! The PEcAn setups here are written for the LDNDC version that has been downloaded on 2022-10-19. Some of the newer versions of LDNDC may have differences in the names of variables and parameters. **PEcAn configuration file additions** -The following sections of the PEcAn XML are relevant or have influence at the moment: +The following sections of the PEcAn XML are relevant or have influence at this development stage: `ensemble` - `variable`: Options are only LAI and GPP + `variable`: LAI, GPP, NPP, NEE, Respirations (*AutoResp*, *HeteroResp*, *TotalResp*), biomass of harvesting (*harvest_carbon_flux*), AGB and BGB (*below_ground_carbon_content*). `model` - `id`: Corresponding the id of LDNDC in BETY + `id`: Corresponding the id of LDNDC in BETY. `run` `inputs` `start_date`: Start date of the simulation `end_date`: End date of the simulation + + +Paths to meteorological drivers, events and initial conditions. Paths to airchemistry and groundwater files, may also be included, but are not required. ### Model specific input files -LDNDC takes several files as an input, and can also generate multiple output files. These ńotes regarding input files are written from the PEcAn integration point of view, and detailed information about input files can be found from the user guide, which is available via the above links. +LDNDC takes multiple input files, and can also generate multiple output files. These notes on input files are written from the PEcAn integration point of view, and detailed information on input files can be found in the user guide, which is available via the links above. Input files: -`Project` — Contains essential information in order to set-up the simulation run. For example, the file contains information of other files which will be used in the simulations as well as the time period when the simulations are taking a place. +`Project` — Contains essential information in order to set up the simulation run. For example, the file contains paths to other files that will be used in the simulations, as well as the time period in which the simulations will take place. `Setup` — Defines which modules are used. `Climate` — Met data that is used in simulations. -`Speciesparameters` — Species used in the simulations should be defined here and the file holds the approppriate information of the used species. +`Speciesparameters` — Species used in the simulations should be defined here and the file contains the parametrization for the species. -`Siteparameters` — Works similarly as species parameters, but in the site features point of view. +`Siteparameters` — Works similarly to speciesparameters, but from the point of view of site parametrization. `Event` — Holds information about the events, which are essential in arable simulations. -`Site` — Specific information about the site. +`Site` — Specific information about the site (e.g. carbon and nitrogen contents, hydrological characteristics). -`Airchemistry` — Information about the airchemistry. +`Airchemistry` — Information about the air chemistry. #### Model configuration files -Due to the amount of different input files, there are several templates which are used in model configuration. -Templates are located at `models/ldndc/inst/` and are copied to the simulation's run directory before the model run. - -* **project.ldndc** : Project file is populated in `write.configs.LDNDC` based on the inputs of the PEcAn run. In addition to having the correct input files that are used in the simulation, the project file is filled in with the simulation time period and the directory paths regarding other input files that are read during the simulation. The file also sets the path for output files. These other input files are found from the same directory as the project file when starting the model run, and the output file is created among the other outputs of the simulation. At the moment, simulations are executed half-hourly. +Due to the number of different input files, there are several templates that are used in the model configuration. +The templates are located in `models/ldndc/inst/`. These templates wll be populated with parametrizations and initial conditions when configurations are written. The drivers and events on the other hand should be informed by giving a path to the driver/event file in pecan.xml. -* **template.job** : Will be filled in with the information that model execution requires in PEcAn. Calls for example models binary and executes the `project.ldndc` file. +Many configurations for the model are (less surprisingly) written in `write.configs.LDNDC`. This is the file that may need to be modified in order to make the model to run appropriately in terms of the simulated species. Currently, there are a few crop species that the workflow recognizes (triticale, barley and oat), a couple of grass species (timothy and meadow) and one forest species (pipy). However, if other species options are needed, it is relatively easy to modify the code in configurations and add them so that the workflow can handle them. Also, the path for running the model binary is created in `write.configs.LDNDC` and will likely need some changes depending on the location of the model binaries/configurations on the user's server. -* **events_template.xml** : Has three events (plant, manure and cut), which are filled in in `write.configs.LDNDC`. Events should be generated based on some event file, but such implementation has not been applied yet. This template is only meant for testing for now. -* **speciesparameter_template.xml** : Has one replacable section. This section is filled in within `write.configs.LDNDC`, and is currently just hard-coded values for perennial grass species. This will be one of the key files, because the used pfts and corresponding pft values are written here. At this stage, it is hard-coded to only have one pft, but it should be generated based on the trait values that are available from BETY database. +* **project.ldndc** : The project file is populated in `write.configs.LDNDC` based on the input from the PEcAn settings. In addition to containing the correct names for the input files used in the simulation, the project file contains the start and end points for the simulation period. The file also specifies the path where the output files will be written. The other input files should be in the same directory as the project file when the model run is started. The output directory is created amongst the other outputs of the simulation. Simulations are run half-hourly by default and this is hard coded in the `write.configs.LDNDC` file. -* **siteparameters.xml** : Some default value for siteparameters. +* **template.job** : Will be filled in with the information needed to run the model in PEcAn. For example, calls the model binary and executes the `project.ldndc` file. -* **site.xml** : Some default values for site. +* **speciesparameter_template.xml** : Has a replaceable section. This section is filled in within `write.configs.LDNDC`. This file uses the given prior values for the species. Currently, there are a few species hard coded in `write.configs.LDNDC`. See the comment above. -* **airchemistry.txt** : Some default data for airchemistry. +* **siteparameters.xml** : This file is populated by given site(parameter) prior values. -* **climate.txt** : This file is created from the netcdf file. A symbolic link will be created into the run directory for it. +* **site.xml** : This file is written based on the given initial condition file (netcdf). If no path is given in *poolinitcond*, then some default mineral clay soil settings will be written to this file. However, it is strongly recommended that some initial conditions based on the simulated site are provided. Variables that can be found from the initial conditions, but are not required: + + *For each layer* + - pH (-), + - volume_fraction_of_water_in_soil_at_field_capacity (m3 m-3), + - volume_fraction_of_condensed_water_in_soil_at_wilting_point (m3 m-3), + - soil_nitrogen_content (kg kg-1), + - soil_carbon_content (kg kg-1), + - mass_fraction_of_clay_in_soil (kg kg-1), + - mass_fraction_of_sand_in_soil (kg kg-1), + - mass_fraction_of_silt_in_soil (kg kg-1), + - soil_density (kg m-3), + - soil_hydraulic_conductivity_at_saturation (m s-1), + - stratum = (-) [number of how many stratums a soil layer has] + + *Single value* + - c2n_humus (ratio), [is written in siteparameter file] + - AGB = (kg m-2), [is written in events file] + - fractional_cover (%) [is written in events file] + + *Model specific* + - history (soil use history, e.g. arable) + - soil_type (e.g. ORMA, SALO) + -* **setup.xml** : Contains information which modules the model simulation is using. Default settings should fit for most of the purposes and these are currently hard-coded. +* **setup.xml** : Contains information about which modules the model simulation is using. Default settings should be suitable for most of the purposes and are currently hard-coded. The setups differ for agricultural and forest sites. ### Installation notes -Obtaining LDNDC program requires credentials, which the user is able to request from the developers of the model. With the credentials, the pre-compiled LDNDC program can be downloaded here: https://ldndc.imk-ifu.kit.edu/download/download-model.php +In order to obtain the LDNDC model, the credentials are required. The user can request them from the developers of the model. With the credentials, the pre-compiled LDNDC program can be downloaded here: https://ldndc.imk-ifu.kit.edu/download/download-model.php -After obtaining the necessary files, the user should execute the install script that is found from the ldndc-'_version-number_' directory. With linux, executing would happen with command `sh install.sh`. Succesfull installation will create a `.ldndc` directory to the user's home directory. Running the simulations happens by calling the ldndc executable (found from the `/bin` directory) and giving the path file to the project file that contains the specs of the simulation. Instructions in detail and how to play with these setups are explained more broadly in the user guide. \ No newline at end of file +Once the necessary files have been obtained, the user should execute the installation script found in the ldndc-'_version-number_' directory. On linux, executing would happen with the command `sh install.sh`. A successful installation will create a `.ldndc` directory in the user's home directory. (Note, that this `.ldndc` directory path will be used in `write.configs.LDNDC`.) Running the simulations is done by calling the ldndc executable (found in the `/bin` directory) and giving the path to the project file containing the specs of the simulation. Detailed instructions and how to play with these setups can be found in the user guide. \ No newline at end of file diff --git a/book_source/03_topical_pages/09_standalone_tools.Rmd b/book_source/03_topical_pages/09_standalone_tools.Rmd index f9ca555a796..9462799e87b 100644 --- a/book_source/03_topical_pages/09_standalone_tools.Rmd +++ b/book_source/03_topical_pages/09_standalone_tools.Rmd @@ -43,7 +43,7 @@ To load the Ameriflux data for the Harvard Forest (US-Ha1) site. bety = PEcAn.DB::betyConnect(php.config = "pecan/web/config.php") ``` - where the complete path to the `config.php` is specified. See [here](https://github.com/PecanProject/pecan/blob/master/web/config.example.php) for an example `config.php` file. + where the complete path to the `config.php` is specified. See [here](https://github.com/PecanProject/pecan/blob/main/web/config.example.php) for an example `config.php` file. 2. Look up the inputs record for the data in BETY. diff --git a/book_source/03_topical_pages/11_adding_to_pecan.Rmd b/book_source/03_topical_pages/11_adding_to_pecan.Rmd index 620c5f5e298..224e893d279 100644 --- a/book_source/03_topical_pages/11_adding_to_pecan.Rmd +++ b/book_source/03_topical_pages/11_adding_to_pecan.Rmd @@ -149,7 +149,7 @@ The new variable can be used to create a prior distribution for it as in the "Cr #### Setting up the module directory (required) -PEcAn assumes that the interface modules are available as an R package in the models directory named after the model in question. The simplest way to get started on that R package is to make a copy the [_template_](https://github.com/PecanProject/pecan/tree/master/models/template) directory in the pecan/models folder and re-name it to the name of your model. In the code, filenames, and examples below you will want to substitute the word **MODEL** for the name of your model (note: R is case-sensitive). +PEcAn assumes that the interface modules are available as an R package in the models directory named after the model in question. The simplest way to get started on that R package is to make a copy the [_template_](https://github.com/PecanProject/pecan/tree/main/models/template) directory in the pecan/models folder and re-name it to the name of your model. In the code, filenames, and examples below you will want to substitute the word **MODEL** for the name of your model (note: R is case-sensitive). If you do not want to write the interface modules in R then it is fairly simple to set up the R functions describe below to just call the script you want to run using R's _system_ command. Scripts that are not R functions should be placed in the _inst_ folder and R can look up the location of these files using the function _system.file_ which takes as arguments the _local_ path of the file within the package folder and the name of the package (typically PEcAn.MODEL). For example @@ -697,9 +697,7 @@ This information is stored in a Format record in the bety database. Make sure to If the Format you are looking for is not available, you will need to create a new record. Before entering information into the database, you need to be able to answer the following questions about your data: - What is the file MIME type? - - We have a suit of functions for loading in data in open formats such as CSV, txt, netCDF, etc. - - PEcAn has partnered with the [NCSA BrownDog project](http://browndog.ncsa.illinois.edu/) to create a service that can read and convert as many data formats as possible. If your file type is less common or a proprietary type, you can use the [BrownDog DAP](http://dap.ncsa.illinois.edu/) to convert it to a format that can be used with PEcAn. - - If BrownDog cannot convert your data, you will need to contact us about writing a data specific load function. + - We have a suit of functions for loading in data in open formats such as CSV, txt, netCDF, etc. - What variables does the file contain? - What are the variables named? diff --git a/book_source/03_topical_pages/12_troubleshooting-pecan.Rmd b/book_source/03_topical_pages/12_troubleshooting-pecan.Rmd index 5d75b628b43..131eb071ec6 100755 --- a/book_source/03_topical_pages/12_troubleshooting-pecan.Rmd +++ b/book_source/03_topical_pages/12_troubleshooting-pecan.Rmd @@ -36,7 +36,7 @@ The next step is to add `debugonce()` before running the t This allows you can step through the function and evaluate the different objects as they are created and/or transformed. -See [tests README](https://github.com/PecanProject/pecan/blob/master/tests/README.md) for more information. +See [tests README](https://github.com/PecanProject/pecan/blob/main/tests/README.md) for more information. @@ -44,7 +44,7 @@ See [tests README](https://github.com/PecanProject/pecan/blob/master/tests/READM The following scripts (in `qaqc/vignettes` identify, respectively: -1. [relationships among functions across packages](https://github.com/PecanProject/pecan/blob/master/qaqc/vignettes/function_relationships.Rmd) -2. [function inputs and outputs](https://github.com/PecanProject/pecan/blob/master/qaqc/vignettes/module_output.Rmd) (e.g. that will identify which functions and outputs are used in a workflow). +1. [relationships among functions across packages](https://github.com/PecanProject/pecan/blob/main/qaqc/vignettes/function_relationships.Rmd) +2. [function inputs and outputs](https://github.com/PecanProject/pecan/blob/main/qaqc/vignettes/module_output.Rmd) (e.g. that will identify which functions and outputs are used in a workflow). diff --git a/book_source/03_topical_pages/92_workflow_modules.Rmd b/book_source/03_topical_pages/92_workflow_modules.Rmd index 2248287649b..4c6481a7f24 100644 --- a/book_source/03_topical_pages/92_workflow_modules.Rmd +++ b/book_source/03_topical_pages/92_workflow_modules.Rmd @@ -5,7 +5,7 @@ NOTE: As of PEcAn 1.2.6 -- needs to be updated significantly ## Overview -Workflow inputs and outputs (click to open in new page, then zoom). Code used to generate this image is provided in [qaqc/vignettes/module_output.Rmd](https://github.com/PecanProject/pecan/blob/master/qaqc/vignettes/module_output.Rmd) +Workflow inputs and outputs (click to open in new page, then zoom). Code used to generate this image is provided in [base/qaqc/vignettes/module_output.Rmd](https://github.com/PecanProject/pecan/blob/master/base/qaqc/vignettes/module_output.Rmd) [![PEcAn Workflow](http://isda.ncsa.illinois.edu/~kooper/EBI/workflow.svg)](http://isda.ncsa.illinois.edu/~kooper/EBI/workflow.svg) diff --git a/book_source/03_topical_pages/93_installation/03_install_OS/04_Installing-PEcAn-OSX.Rmd b/book_source/03_topical_pages/93_installation/03_install_OS/04_Installing-PEcAn-OSX.Rmd index d057ea41246..d48107a6cab 100755 --- a/book_source/03_topical_pages/93_installation/03_install_OS/04_Installing-PEcAn-OSX.Rmd +++ b/book_source/03_topical_pages/93_installation/03_install_OS/04_Installing-PEcAn-OSX.Rmd @@ -1,19 +1,20 @@ ### Mac OSX {#macosx} -These are specific notes for installing PEcAn on Mac OSX and will be referenced from the main [installing PEcAn](Installing-PEcAn) page. You will at least need to install the build environment and Postgres sections. If you want to access the database/PEcAn using a web browser you will need to install Apache. To access the database using the BETY interface, you will need to have Ruby installed. +These are specific notes for installing PEcAn on Mac OSX and referenced from the [installing PEcAn](Installing-PEcAn) page. -This document also contains information on how to install the Rstudio server edition as well as any other packages that can be helpful. +Build environment and Postgres sections are required to install and use PEcAn. +Optional software includes Apache, Rails, and Rstudio. Apache is required to run the BETYdb UI and the web-based version of PEcAn - both are optional. Rails is requred to use the BETYdb web interface. Rstudio is a commonly used IDE for R. #### Install build environment -```bash -# install R -# download from http://cran.r-project.org/bin/macosx/ +##### Option 1: Download and install + +R: download from http://cran.r-project.org/bin/macosx/ -# install gfortran -# download from http://cran.r-project.org/bin/macosx/tools/ +gfortran: download from http://cran.r-project.org/bin/macosx/tools/ +```bash # install OpenMPI curl -o openmpi-1.6.3.tar.gz http://www.open-mpi.org/software/ompi/v1.6/downloads/openmpi-1.6.3.tar.gz tar zxf openmpi-1.6.3.tar.gz @@ -42,14 +43,63 @@ sudo make install cd .. ``` -#### Install Postgres +##### Option 2: Homebrew + +```bash +# R +brew install --cask r +# gfortran +brew install gcc +# OpenMPI +brew install open-mpi +# szip +brew install szip +# HDF5 +brew install hdf5 +## homebrew should configure hdf5 with fortran and cxx, otherwise: +## brew install hdf5 --with-fortran --with-cxx +``` -For those on a Mac I use the following app for postgresql which has -postgis already installed (http://postgresapp.com/) -To get postgis run the following commands in psql: +#### Install Postgres and PostGIS + +##### Option 1: + +For MacOS, the Postgres.app provides Postgres with PostGIS +already installed (http://postgresapp.com/). + +To run Postgres: +* Open Postgres.app. +* In the menu bar, click the elephant icon and select “Open psql”. + +##### Option 2: install using homebrew: ```bash +# optional: remove existing postgres installations with: +# brew uninstall --force postgresql + +# install Postgres, fixed at v12 (officially supported by BETYdb): +brew install postgres@12 +brew pin postgres@12 + +# PostGIS +brew install postgis + +# to run Postgres: +brew services start postgresql +``` + +#### Enable PostGIS + +To enable PostGIS, you should start Postgres: + +```bash +psql postgres +``` + +And then run the following commands: + +```sql ##### Enable PostGIS (includes raster) CREATE EXTENSION postgis; ##### Enable Topology @@ -60,23 +110,42 @@ CREATE EXTENSION fuzzystrmatch; CREATE EXTENSION postgis_tiger_geocoder; ``` -To check your postgis run the following command again in psql: `SELECT PostGIS_full_version();` +To check your postgis run the following command again in psql: + +```sql +SELECT PostGIS_full_version();` +``` #### Additional installs ##### Install JAGS -Download JAGS from http://sourceforge.net/projects/mcmc-jags/files/JAGS/3.x/Mac%20OS%20X/JAGS-Mavericks-3.4.0.dmg/download + +##### Option 1: using homebrew + +```bash +brew install jags +``` + +##### Option 2: Download + +Download JAGS from http://sourceforge.net/projects/mcmc-jags/files/JAGS/3.x/Mac%20OS%20X/JAGS-Mavericks-3.4.0.dmg/download. + ##### Install udunits -Installing udunits-2 on MacOSX is done from source. +##### Option 1: Install using homebrew: + +```bash +brew install udunits +``` + +##### Option 2: Install udunits-2 on MacOSX is done from source. * download most recent [version of Udunits here](http://www.unidata.ucar.edu/downloads/udunits/index.jsp) * instructions for [compiling from source](http://www.unidata.ucar.edu/software/udunits/udunits-2/udunits2.html#Obtain) - ```bash curl -o udunits-2.1.24.tar.gz ftp://ftp.unidata.ucar.edu/pub/udunits/udunits-2.1.24.tar.gz tar zxf udunits-2.1.24.tar.gz @@ -86,7 +155,7 @@ make sudo make install ``` -#### Apache Configuration +#### Apache Configuration (Optional) Mac does not support pdo/postgresql by default. The easiest way to install is use: http://php-osx.liip.ch/ @@ -102,10 +171,24 @@ Alias /pecan ${PWD}/pecan/web EOF ``` -#### Ruby +#### Ruby (Optional) + +_Note: it is recommended that BETYdb be run using Docker because the application uses unsupported versions of Ruby and Rails._ + +The BETYdb application requires Ruby version 2.7.7, as specified in [PecanProject/bety/.ruby-version](https://github.com/PecanProject/bety/blob/develop/.ruby-version). + +```bash +brew install rbenv +rbenv init +rbenv install 2.7.7 +``` + +#### Rstudio (Optional) -The default version of ruby should work. Or use [JewelryBox](https://jewelrybox.unfiniti.com/). +For MacOS, you can download [Rstudio Desktop](http://www.rstudio.com/). -#### Rstudio Server +Or using homebrew: -For the mac you can download [Rstudio Desktop](http://www.rstudio.com/). +```bash +brew install --cask rstudio +``` diff --git a/book_source/03_topical_pages/93_installation/03_install_OS/05_install_BETY.Rmd b/book_source/03_topical_pages/93_installation/03_install_OS/05_install_BETY.Rmd index 24739ba9154..a76c0ac3ad6 100644 --- a/book_source/03_topical_pages/93_installation/03_install_OS/05_install_BETY.Rmd +++ b/book_source/03_topical_pages/93_installation/03_install_OS/05_install_BETY.Rmd @@ -8,7 +8,7 @@ If you would like to install the Docker Version of BETY, please consult the [PEc #### Install Database + Data -* _note_ To install BETYdb without PEcAn, first download the [`load.bety.sh` script](https://raw.githubusercontent.com/PecanProject/pecan/master/scripts/load.bety.sh) +* _note_ To install BETYdb without PEcAn, first download the [`load.bety.sh` script](https://raw.githubusercontent.com/PecanProject/pecan/main/scripts/load.bety.sh) ```sh # install database (code assumes password is bety) diff --git a/book_source/03_topical_pages/94_docker/02_quickstart.Rmd b/book_source/03_topical_pages/94_docker/02_quickstart.Rmd index 94612ee13d4..d3014529b5f 100644 --- a/book_source/03_topical_pages/94_docker/02_quickstart.Rmd +++ b/book_source/03_topical_pages/94_docker/02_quickstart.Rmd @@ -35,11 +35,11 @@ curl -v -X POST \ ### Configure docker-compose {#pecan-setup-compose-configure} -This section will let you download some configuration files. The documentation provides links to the latest released version (master branch in GitHub) or the develop version that we are working on (develop branch in GitHub) which will become the next release. If you cloned the PEcAn GitHub repository you can use `git checkout ` to switch branches. +This section will let you download some configuration files. The documentation provides links to the latest released version (main branch in GitHub) or the develop version that we are working on (develop branch in GitHub) which will become the next release. If you cloned the PEcAn GitHub repository you can use `git checkout ` to switch branches. -The PEcAn Docker stack is configured using a `docker-compose.yml` file. You can download just this file directly from GitHub [latest](https://raw.githubusercontent.com/PecanProject/pecan/master/docker-compose.yml) or [develop](https://raw.githubusercontent.com/PecanProject/pecan/master/docker-compose.yml). You can also find this file in the root of cloned PEcAn GitHub repository. There is no need to edit the `docker-compose.yml` file. You can use either the `.env` file to change some of the settings, or the `docker-compose.override.yml` file to modify the `docker-compose.yml` file. This makes it easier for you to get an updated version of the `docker-compose.yml` file and not lose any changes you have made to it. +The PEcAn Docker stack is configured using a `docker-compose.yml` file. You can download just this file directly from GitHub [latest](https://raw.githubusercontent.com/PecanProject/pecan/main/docker-compose.yml) or [develop](https://raw.githubusercontent.com/PecanProject/pecan/main/docker-compose.yml). You can also find this file in the root of cloned PEcAn GitHub repository. There is no need to edit the `docker-compose.yml` file. You can use either the `.env` file to change some of the settings, or the `docker-compose.override.yml` file to modify the `docker-compose.yml` file. This makes it easier for you to get an updated version of the `docker-compose.yml` file and not lose any changes you have made to it. -Some of the settings in the `docker-compose.yml` can be set using a `.env` file. You can download either the [latest](https://raw.githubusercontent.com/PecanProject/pecan/master/docker/env.example) or the [develop](https://raw.githubusercontent.com/PecanProject/pecan/develop/docker/env.example) version. If you have cloned the GitHub repository it is also located in the docker folder. This file should be called `.env` and be placed in the same folder as your `docker-compose.yml` file. This file will allow you to set which version of PEcAn or BETY to use. See the comments in this file to control the settings. Option you might want to set are: +Some of the settings in the `docker-compose.yml` can be set using a `.env` file. You can download either the [latest](https://raw.githubusercontent.com/PecanProject/pecan/main/docker/env.example) or the [develop](https://raw.githubusercontent.com/PecanProject/pecan/develop/docker/env.example) version. If you have cloned the GitHub repository it is also located in the docker folder. This file should be called `.env` and be placed in the same folder as your `docker-compose.yml` file. This file will allow you to set which version of PEcAn or BETY to use. See the comments in this file to control the settings. Option you might want to set are: - `PECAN_VERSION` : The docker images to use for PEcAn. The default is `latest` which is the latest released version of PEcAn. Setting this to `develop` will result in using the version of PEcAn which will become the next release. - `PECAN_FQDN` : Is the name of the server where PEcAn is running. This is what is used to register all files generated by this version of PEcAn (see also `TRAEFIK_HOST`). @@ -48,7 +48,7 @@ Some of the settings in the `docker-compose.yml` can be set using a `.env` file. - `TRAEFIK_HOST` : Should be the FQDN of the server, this is needed when generating a SSL certificate. For SSL certificates you will need to set `TRAEFIK_ACME_ENABLE` as well as `TRAEFIK_ACME_EMAIL`. - `TRAEFIK_IPFILTER` : is used to limit access to certain resources, such as RabbitMQ and the Traefik dashboard. -A final file, which is optional, is a `docker-compose.override.yml`. You can download a version for the [latest](https://raw.githubusercontent.com/PecanProject/pecan/master/docker/docker-compose.example.yml) and [develop](https://raw.githubusercontent.com/PecanProject/pecan/develop/docker/docker-compose.example.yml) versions. If you have cloned the GitHub repository it is located in the docker folder. Use this file as an example of what you can do, only copy the pieces over that you really need. This will allow you to make changes to the docker-compose file for your local installation. You can use this to add additional containers to your stack, change the path where docker stores the data for the containers, or you can use this to open up the postgresql port. +A final file, which is optional, is a `docker-compose.override.yml`. You can download a version for the [latest](https://raw.githubusercontent.com/PecanProject/pecan/main/docker/docker-compose.example.yml) and [develop](https://raw.githubusercontent.com/PecanProject/pecan/develop/docker/docker-compose.example.yml) versions. If you have cloned the GitHub repository it is located in the docker folder. Use this file as an example of what you can do, only copy the pieces over that you really need. This will allow you to make changes to the docker-compose file for your local installation. You can use this to add additional containers to your stack, change the path where docker stores the data for the containers, or you can use this to open up the postgresql port. ```yaml version: "3" diff --git a/book_source/03_topical_pages/94_docker/04_models.Rmd b/book_source/03_topical_pages/94_docker/04_models.Rmd index 244cda3ba5c..8054671922e 100644 --- a/book_source/03_topical_pages/94_docker/04_models.Rmd +++ b/book_source/03_topical_pages/94_docker/04_models.Rmd @@ -55,7 +55,7 @@ It is important values for `type` and `version` are set correct. The PEcAn code To build the docker image, we use a Dockerfile (see example below) and run the following command. This command will expect the Dockerfile to live in the model specific folder and the command is executed in the root pecan folder. It will copy the content of the pecan folder and make it available to the build process (in this example we do not need any additional files). -Since we can have multiple different versions of a model be available for PEcAn we ar using the following naming schema `pecan/model--:-:= timeout) { - return(NA) - } - f <- CFILE(file, mode = "wb") - curlPerform(url = url, writedata = f@ref, .opts = .opts) - RCurl::close(f) - - return(file) -} # download.browndog - -type <- "NARR" -site <- "US-NR1" -site_lat <- "40.0329" -site_lon <- "-105.546" -startDate <- "2001-01-01 00:00:00" -endDate <- "2001-12-31 23:59:59" -browndog <- "http://host/path" -userpass <- "user:password" -output <- "clim" - -outputfile <- paste(site, output, sep = ".") - -xmldata <- paste0("", - "", type, "", - "", site, "", - "", site_lat, "", - "", site_lon, "", - "", startDate, "", - "", endDate, "", - "") - -# post to browndog -curloptions <- list(userpwd = userpass, httpauth = 1L, followlocation = TRUE) -result <- postForm(paste0(browndog, output, "/"), fileData = fileUpload("pecan.xml", xmldata, "text/xml"), - .opts = curloptions) -url <- gsub(".*(.*).*", "\\1", result) -download.browndog(url, outputfile, 120, curloptions) diff --git a/docker-compose.dev.yml b/docker-compose.dev.yml index 0eed697467e..38e7cb8663e 100644 --- a/docker-compose.dev.yml +++ b/docker-compose.dev.yml @@ -2,8 +2,6 @@ # # docker-compose -f docker-compose.yml -f docker-compose.dev.yml -version: '3.2' - services: # web application. This expects the config.php to be copied from docker/web @@ -16,14 +14,14 @@ services: executor: volumes: - 'pecan_home:/pecan/' - - 'pecan_lib:/usr/local/lib/R/site-library/' + - 'R_library:/usr/local/lib/R/site-library/' # use same for R development in rstudio rstudio: volumes: - 'pecan_home:/pecan/' - 'pecan_home:/home/${PECAN_RSTUDIO_USER:-carya}/pecan/' - - 'pecan_lib:/usr/local/lib/R/site-library/' + - 'R_library:/usr/local/lib/R/site-library/' # add route to api for help when debugging plumber application. Same can # be used for other apps. #labels: @@ -35,7 +33,7 @@ services: # this can be used if you are changng the code for a model in PEcAN sipnet: volumes: - - 'pecan_lib:/usr/local/lib/R/site-library/' + - 'R_library:/usr/local/lib/R/site-library/' # this will open postgres to the hostcompute #postgres: @@ -79,10 +77,10 @@ volumes: type: none device: '${PWD}/web/' o: bind - pecan_lib: + R_library: # driver_opts: # type: none - # device: '${HOME}/volumes/pecan/lib' + # device: '${HOME}/volumes/pecan/R_library' # o: bind #pecan: # driver_opts: diff --git a/docker-compose.https.yml b/docker-compose.https.yml index 13214e7cab5..a2dd561be14 100644 --- a/docker-compose.https.yml +++ b/docker-compose.https.yml @@ -1,5 +1,4 @@ # Use this file to enable https -version: "3.2" services: traefik: diff --git a/docker-compose.prod.yml b/docker-compose.prod.yml new file mode 100644 index 00000000000..6de05893cf8 --- /dev/null +++ b/docker-compose.prod.yml @@ -0,0 +1,288 @@ +services: + + # webserver to handle all traffic. This can use let's encrypt to generate a SSL cert. + traefik: + image: "traefik:v2.9" + command: + - --log.level=INFO + - --api=true + - --api.dashboard=true + - --api.insecure=true + # Entrypoints + - --entrypoints.web.address=:80 + # Docker setup + - --providers.docker=true + - --providers.docker.endpoint=unix:///var/run/docker.sock + - --providers.docker.exposedbydefault=false + - --providers.docker.watch=true + # https + - --entrypoints.websecure.address=:443 + - --entrypoints.websecure.http.tls.certresolver=myresolver + # letsencrypt + - --certificatesresolvers.myresolver.acme.email=${TRAEFIK_ACME_EMAIL:-"cert@example.com"} + - --certificatesresolvers.myresolver.acme.storage=/config/acme.json + # uncomment to use testing certs + # - --certificatesresolvers.myresolver.acme.caserver=https://acme-staging-v02.api.letsencrypt.org/directory + - --certificatesresolvers.myresolver.acme.httpchallenge=true + - --certificatesresolvers.myresolver.acme.httpchallenge.entrypoint=web + restart: "unless-stopped" + networks: + - pecan + security_opt: + - no-new-privileges:true + ports: + - "${TRAEFIK_HTTP_PORT-80}:80" + - "${TRAEFIK_HTTPS_PORT-443}:443" + volumes: + - "traefik:/config" + - "/var/run/docker.sock:/var/run/docker.sock:ro" + + # ---------------------------------------------------------------------- + # Job management system. Jobs are distributed through the message + # system. PEcAn uses this to distribute the work/load across multiple + # containers. + # ---------------------------------------------------------------------- + + # rabbitmq to connect to extractors + rabbitmq: + image: rabbitmq:3.8-management + restart: unless-stopped + networks: + - pecan + environment: + - RABBITMQ_DEFAULT_USER=${RABBITMQ_DEFAULT_USER:-guest} + - RABBITMQ_DEFAULT_PASS=${RABBITMQ_DEFAULT_PASS:-guest} + volumes: + - rabbitmq:/var/lib/rabbitmq + + # ---------------------------------------------------------------------- + # Database to hold the data from PEcAn and BETY. + # ---------------------------------------------------------------------- + + # postgresql + postgis to hold all the data + postgres: + image: mdillon/postgis:9.5 + restart: unless-stopped + networks: + - pecan + volumes: + - postgres:/var/lib/postgresql/data + + # ---------------------------------------------------------------------- + # BETY rails frontend to the database + # ---------------------------------------------------------------------- + bety: + image: pecan/bety:${BETY_VERSION:-latest} + restart: unless-stopped + networks: + - pecan + environment: + - UNICORN_WORKER_PROCESSES=1 + - SECRET_KEY_BASE=${BETY_SECRET_KEY:-notasecret} + - RAILS_RELATIVE_URL_ROOT=/bety + - LOCAL_SERVER=${BETY_LOCAL_SERVER:-99} + volumes: + - bety:/home/bety/log + depends_on: + - postgres + labels: + - "traefik.enable=true" + - "traefik.http.services.bety.loadbalancer.server.port=8000" + - "traefik.http.routers.bety.rule=Host(`${TRAEFIK_HOST:-pecan.localhost}`) && PathPrefix(`/bety/`)" + + # ---------------------------------------------------------------------- + # PEcAn application + # ---------------------------------------------------------------------- + + # PEcAn documentation as well as PEcAn home page + docs: + image: pecan/docs:${PECAN_VERSION:-latest} + restart: unless-stopped + networks: + - pecan + labels: + - "traefik.enable=true" + - "traefik.http.services.docs.loadbalancer.server.port=80" + - "traefik.http.routers.docs.rule=Host(`${TRAEFIK_HOST:-pecan.localhost}`) && PathPrefix(`/`)" + + # PEcAn web front end, this is just the PHP code + pecan: + user: "${UID:-1001}:${GID:-1001}" + image: pecan/web:${PECAN_VERSION:-latest} + restart: unless-stopped + networks: + - pecan + environment: + - RABBITMQ_URI=${RABBITMQ_URI:-amqp://guest:guest@rabbitmq/%2F} + - FQDN=${PECAN_FQDN:-docker} + - NAME=${PECAN_NAME:-docker} + - SECRET_KEY_BASE=${BETY_SECRET_KEY:-thisisnotasecret} + depends_on: + - postgres + - rabbitmq + labels: + - "traefik.enable=true" + - "traefik.http.services.pecan.loadbalancer.server.port=8080" + - "traefik.http.routers.pecan.rule=Host(`${TRAEFIK_HOST:-pecan.localhost}`) && PathPrefix(`/pecan/`)" + volumes: + - pecan:/data + - pecan:/var/www/html/pecan/data + + # PEcAn model monitor + monitor: + user: "${UID:-1001}:${GID:-1001}" + image: pecan/monitor:${PECAN_VERSION:-latest} + restart: unless-stopped + networks: + - pecan + environment: + - RABBITMQ_URI=${RABBITMQ_URI:-amqp://guest:guest@rabbitmq/%2F} + - FQDN=${PECAN_FQDN:-docker} + depends_on: + - rabbitmq + labels: + - "traefik.enable=true" + - "traefik.http.routers.monitor.rule=Host(`${TRAEFIK_HOST:-pecan.localhost}`) && PathPrefix(`/monitor/`)" + - "traefik.http.routers.monitor.middlewares=monitor-stripprefix" + - "traefik.http.middlewares.monitor-stripprefix.stripprefix.prefixes=/monitor" + volumes: + - pecan:/data + + # PEcAn executor, executes jobs. Does not the actual models + executor: + user: "${UID:-1001}:${GID:-1001}" + image: pecan/executor:${PECAN_VERSION:-latest} + restart: unless-stopped + networks: + - pecan + environment: + - RABBITMQ_URI=${RABBITMQ_URI:-amqp://guest:guest@rabbitmq/%2F} + - RABBITMQ_PREFIX=/ + - RABBITMQ_PORT=15672 + - FQDN=${PECAN_FQDN:-docker} + depends_on: + - postgres + - rabbitmq + volumes: + - pecan:/data + + # ---------------------------------------------------------------------- + # PEcAn models, list each model you want to run below + # ---------------------------------------------------------------------- + + # PEcAn basgra model runner + basgra: + user: "${UID:-1001}:${GID:-1001}" + image: pecan/model-basgra-basgra_n_v1.0:${PECAN_VERSION:-latest} + restart: unless-stopped + networks: + - pecan + environment: + - RABBITMQ_URI=${RABBITMQ_URI:-amqp://guest:guest@rabbitmq/%2F} + depends_on: + - rabbitmq + volumes: + - pecan:/data + + # PEcAn sipnet model runner + sipnet: + user: "${UID:-1001}:${GID:-1001}" + image: pecan/model-sipnet-git:${PECAN_VERSION:-latest} + restart: unless-stopped + networks: + - pecan + environment: + - RABBITMQ_URI=${RABBITMQ_URI:-amqp://guest:guest@rabbitmq/%2F} + depends_on: + - rabbitmq + volumes: + - pecan:/data + + # PEcAn ED model runner + ed2: + user: "${UID:-1001}:${GID:-1001}" + image: pecan/model-ed2-2.2.0:${PECAN_VERSION:-latest} + restart: unless-stopped + networks: + - pecan + environment: + - RABBITMQ_URI=${RABBITMQ_URI:-amqp://guest:guest@rabbitmq/%2F} + depends_on: + - rabbitmq + volumes: + - pecan:/data + + # PEcAn MAESPA model runner + maespa: + user: "${UID:-1001}:${GID:-1001}" + image: pecan/model-maespa-git:${PECAN_VERSION:-latest} + restart: unless-stopped + networks: + - pecan + environment: + - RABBITMQ_URI=${RABBITMQ_URI:-amqp://guest:guest@rabbitmq/%2F} + depends_on: + - rabbitmq + volumes: + - pecan:/data + + # PEcAn BioCro model runner + biocro: + user: "${UID:-1001}:${GID:-1001}" + image: pecan/model-biocro-0.95:${PECAN_VERSION:-latest} + restart: unless-stopped + networks: + - pecan + environment: + - RABBITMQ_URI=${RABBITMQ_URI:-amqp://guest:guest@rabbitmq/%2F} + depends_on: + - rabbitmq + volumes: + - pecan:/data + + # ---------------------------------------------------------------------- + # PEcAn API + # ---------------------------------------------------------------------- + api: + user: "${UID:-1001}:${GID:-1001}" + image: pecan/api:${PECAN_VERSION:-latest} + restart: unless-stopped + networks: + - pecan + environment: + - PGHOST=${PGHOST:-postgres} + - HOST_ONLY=${HOST_ONLY:-FALSE} + - AUTH_REQ=${AUTH_REQ:-FALSE} + - RABBITMQ_URI=${RABBITMQ_URI:-amqp://guest:guest@rabbitmq/%2F} + - DATA_DIR=${DATA_DIR:-/data/} + - DBFILES_DIR=${DBFILES_DIR:-/data/dbfiles/} + - SECRET_KEY_BASE=${BETY_SECRET_KEY:-thisisnotasecret} + labels: + - "traefik.enable=true" + - "traefik.http.routers.api.rule=Host(`${TRAEFIK_HOST:-pecan.localhost}`) && PathPrefix(`/api/`)" + - "traefik.http.services.api.loadbalancer.server.port=8000" + depends_on: + - postgres + volumes: + - pecan:/data/ + +# ---------------------------------------------------------------------- +# Name of network to be used by all containers +# ---------------------------------------------------------------------- +networks: + pecan: + +# ---------------------------------------------------------------------- +# Volumes used by the PEcAn stack. These volumes are used to make sure +# we have persistent data. You can use add the commented section to your +# docker-compose.override.yml to have the docker volumes placed at a +# specific location. +# ---------------------------------------------------------------------- +volumes: + traefik: + postgres: + bety: + rabbitmq: + pecan: + rstudio: + diff --git a/docker-compose.yml b/docker-compose.yml index 180c5653834..4a2adc17804 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -1,9 +1,8 @@ -version: "3.2" - services: # webserver to handle all traffic. This can use let's encrypt to generate a SSL cert. traefik: + hostname: traefik image: "traefik:v2.9" command: - --log.level=INFO @@ -40,6 +39,7 @@ services: # rabbitmq to connect to extractors rabbitmq: + hostname: rabbitmq image: rabbitmq:3.8-management restart: unless-stopped networks: @@ -54,6 +54,11 @@ services: - "traefik.http.routers.rabbitmq.rule=Host(`rabbitmq.pecan.localhost`)" volumes: - rabbitmq:/var/lib/rabbitmq + healthcheck: + test: rabbitmqctl ping + interval: 10s + timeout: 5s + retries: 5 # ---------------------------------------------------------------------- # Database to hold the data from PEcAn and BETY. @@ -61,17 +66,24 @@ services: # postgresql + postgis to hold all the data postgres: + hostname: postgres image: mdillon/postgis:9.5 restart: unless-stopped networks: - pecan volumes: - postgres:/var/lib/postgresql/data + healthcheck: + test: pg_isready -U postgres + interval: 10s + timeout: 5s + retries: 5 # ---------------------------------------------------------------------- # BETY rails frontend to the database # ---------------------------------------------------------------------- bety: + hostname: bety image: pecan/bety:${BETY_VERSION:-latest} restart: unless-stopped networks: @@ -81,25 +93,36 @@ services: - SECRET_KEY_BASE=${BETY_SECRET_KEY:-notasecret} - RAILS_RELATIVE_URL_ROOT=/bety - LOCAL_SERVER=${BETY_LOCAL_SERVER:-99} + volumes: + - bety:/home/bety/log depends_on: - - postgres + postgres: + condition: service_healthy labels: - "traefik.enable=true" - "traefik.http.services.bety.loadbalancer.server.port=8000" - "traefik.http.routers.bety.rule=Host(`${TRAEFIK_HOST:-pecan.localhost}`) && PathPrefix(`/bety/`)" + healthcheck: + test: "curl --silent --fail http://localhost:8000/$${RAILS_RELATIVE_URL_ROOT} > /dev/null || exit 1" + interval: 10s + timeout: 5s + retries: 5 # ---------------------------------------------------------------------- # RStudio # ---------------------------------------------------------------------- rstudio: + hostname: rstudio image: pecan/base:${PECAN_VERSION:-latest} command: /work/rstudio.sh restart: unless-stopped networks: - pecan depends_on: - - rabbitmq - - postgres + postgres: + condition: service_healthy + rabbitmq: + condition: service_healthy environment: - KEEP_ENV=RABBITMQ_URI RABBITMQ_PREFIX RABBITMQ_PORT FQDN NAME - RABBITMQ_URI=${RABBITMQ_URI:-amqp://guest:guest@rabbitmq/%2F} @@ -133,6 +156,7 @@ services: # PEcAn documentation as well as PEcAn home page docs: + hostname: docs image: pecan/docs:${PECAN_VERSION:-latest} restart: unless-stopped networks: @@ -141,9 +165,15 @@ services: - "traefik.enable=true" - "traefik.http.services.docs.loadbalancer.server.port=80" - "traefik.http.routers.docs.rule=Host(`${TRAEFIK_HOST:-pecan.localhost}`) && PathPrefix(`/`)" + healthcheck: + test: "curl --silent --fail http://localhost/ > /dev/null || exit 1" + interval: 10s + timeout: 5s + retries: 5 # PEcAn web front end, this is just the PHP code pecan: + hostname: pecan-web user: "${UID:-1001}:${GID:-1001}" image: pecan/web:${PECAN_VERSION:-latest} restart: unless-stopped @@ -155,8 +185,10 @@ services: - NAME=${PECAN_NAME:-docker} - SECRET_KEY_BASE=${BETY_SECRET_KEY:-thisisnotasecret} depends_on: - - postgres - - rabbitmq + postgres: + condition: service_healthy + rabbitmq: + condition: service_healthy labels: - "traefik.enable=true" - "traefik.http.services.pecan.loadbalancer.server.port=8080" @@ -164,9 +196,15 @@ services: volumes: - pecan:/data - pecan:/var/www/html/pecan/data + healthcheck: + test: "curl --silent --fail http://localhost:8080/pecan > /dev/null || exit 1" + interval: 10s + timeout: 5s + retries: 5 # PEcAn model monitor monitor: + hostname: monitor user: "${UID:-1001}:${GID:-1001}" image: pecan/monitor:${PECAN_VERSION:-latest} restart: unless-stopped @@ -176,7 +214,10 @@ services: - RABBITMQ_URI=${RABBITMQ_URI:-amqp://guest:guest@rabbitmq/%2F} - FQDN=${PECAN_FQDN:-docker} depends_on: - - rabbitmq + postgres: + condition: service_healthy + rabbitmq: + condition: service_healthy labels: - "traefik.enable=true" - "traefik.http.routers.monitor.rule=Host(`${TRAEFIK_HOST:-pecan.localhost}`) && PathPrefix(`/monitor/`)" @@ -184,9 +225,15 @@ services: - "traefik.http.middlewares.monitor-stripprefix.stripprefix.prefixes=/monitor" volumes: - pecan:/data + healthcheck: + test: "curl --silent --fail http://localhost:9999 > /dev/null || exit 1" + interval: 10s + timeout: 5s + retries: 5 # PEcAn executor, executes jobs. Does not the actual models executor: + hostname: executor user: "${UID:-1001}:${GID:-1001}" image: pecan/executor:${PECAN_VERSION:-latest} restart: unless-stopped @@ -198,17 +245,35 @@ services: - RABBITMQ_PORT=15672 - FQDN=${PECAN_FQDN:-docker} depends_on: - - postgres - - rabbitmq + postgres: + condition: service_healthy + rabbitmq: + condition: service_healthy volumes: - pecan:/data # ---------------------------------------------------------------------- # PEcAn models, list each model you want to run below # ---------------------------------------------------------------------- + # PEcAn FATES model runner + fates: + hostname: fates + user: "${UID:-1001}:${GID:-1001}" + image: ghcr.io/noresmhub/ctsm-api:latest + restart: unless-stopped + networks: + - pecan + environment: + - RABBITMQ_URI=${RABBITMQ_URI:-amqp://guest:guest@rabbitmq/%2F} + depends_on: + rabbitmq: + condition: service_healthy + volumes: + - pecan:/data # PEcAn basgra model runner basgra: + hostname: basgra user: "${UID:-1001}:${GID:-1001}" image: pecan/model-basgra-basgra_n_v1.0:${PECAN_VERSION:-latest} restart: unless-stopped @@ -217,26 +282,30 @@ services: environment: - RABBITMQ_URI=${RABBITMQ_URI:-amqp://guest:guest@rabbitmq/%2F} depends_on: - - rabbitmq + rabbitmq: + condition: service_healthy volumes: - pecan:/data # PEcAn sipnet model runner sipnet: + hostname: sipnet-git user: "${UID:-1001}:${GID:-1001}" - image: pecan/model-sipnet-r136:${PECAN_VERSION:-latest} + image: pecan/model-sipnet-git:${PECAN_VERSION:-latest} restart: unless-stopped networks: - pecan environment: - RABBITMQ_URI=${RABBITMQ_URI:-amqp://guest:guest@rabbitmq/%2F} depends_on: - - rabbitmq + rabbitmq: + condition: service_healthy volumes: - pecan:/data # PEcAn ED model runner ed2: + hostname: ed2-2_2_0 user: "${UID:-1001}:${GID:-1001}" image: pecan/model-ed2-2.2.0:${PECAN_VERSION:-latest} restart: unless-stopped @@ -245,12 +314,14 @@ services: environment: - RABBITMQ_URI=${RABBITMQ_URI:-amqp://guest:guest@rabbitmq/%2F} depends_on: - - rabbitmq + rabbitmq: + condition: service_healthy volumes: - pecan:/data # PEcAn MAESPA model runner maespa: + hostname: maespa-git user: "${UID:-1001}:${GID:-1001}" image: pecan/model-maespa-git:${PECAN_VERSION:-latest} restart: unless-stopped @@ -259,12 +330,14 @@ services: environment: - RABBITMQ_URI=${RABBITMQ_URI:-amqp://guest:guest@rabbitmq/%2F} depends_on: - - rabbitmq + rabbitmq: + condition: service_healthy volumes: - pecan:/data # PEcAn BioCro model runner biocro: + hostname: biocro-0_95 user: "${UID:-1001}:${GID:-1001}" image: pecan/model-biocro-0.95:${PECAN_VERSION:-latest} restart: unless-stopped @@ -273,7 +346,8 @@ services: environment: - RABBITMQ_URI=${RABBITMQ_URI:-amqp://guest:guest@rabbitmq/%2F} depends_on: - - rabbitmq + rabbitmq: + condition: service_healthy volumes: - pecan:/data @@ -282,24 +356,33 @@ services: # ---------------------------------------------------------------------- # PEcAn DB Sync visualization dbsync: + hostname: dbsync image: pecan/shiny-dbsync:${PECAN_VERSION:-latest} restart: unless-stopped networks: - pecan depends_on: - - postgres + postgres: + condition: service_healthy labels: - "traefik.enable=true" - "traefik.http.routers.dbsync.rule=Host(`${TRAEFIK_HOST:-pecan.localhost}`) && PathPrefix(`/dbsync/`)" - "traefik.http.routers.dbsync.middlewares=dbsync-stripprefix" - "traefik.http.middlewares.dbsync-stripprefix.stripprefix.prefixes=/monitor" + healthcheck: + test: "curl --silent --fail http://localhost:3838 > /dev/null || exit 1" + interval: 10s + timeout: 5s + retries: 5 # ---------------------------------------------------------------------- # PEcAn API # ---------------------------------------------------------------------- api: - image: pecan/api:${PECAN_VERSION:-latest} + hostname: api user: "${UID:-1001}:${GID:-1001}" + image: pecan/api:${PECAN_VERSION:-latest} + restart: unless-stopped networks: - pecan environment: @@ -315,9 +398,15 @@ services: - "traefik.http.routers.api.rule=Host(`${TRAEFIK_HOST:-pecan.localhost}`) && PathPrefix(`/api/`)" - "traefik.http.services.api.loadbalancer.server.port=8000" depends_on: - - postgres + postgres: + condition: service_healthy volumes: - pecan:/data/ + healthcheck: + test: "curl --silent --fail http://localhost:8000/api/ping > /dev/null || exit 1" + interval: 10s + timeout: 5s + retries: 5 # ---------------------------------------------------------------------- # Name of network to be used by all containers @@ -334,6 +423,7 @@ networks: volumes: traefik: postgres: + bety: rabbitmq: pecan: rstudio: diff --git a/docker.sh b/docker.sh index 16d87b618e8..4a2e3353992 100755 --- a/docker.sh +++ b/docker.sh @@ -26,7 +26,7 @@ PECAN_GIT_DATE="$(git log --pretty=format:%ad -1)" VERSION=${VERSION:-"$(awk '/Version:/ { print $2 }' base/all/DESCRIPTION)"} # check for branch and set IMAGE_VERSION -if [ "${PECAN_GIT_BRANCH}" == "master" ]; then +if [ "${PECAN_GIT_BRANCH}" == "main" ]; then IMAGE_VERSION=${IMAGE_VERSION:-"latest"} elif [ "${PECAN_GIT_BRANCH}" == "develop" ]; then IMAGE_VERSION=${IMAGE_VERSION:-"develop"} @@ -49,7 +49,7 @@ $0 [-dfhn] [-i ] [-r " echo "# ----------------------------------------------------------------------" # not building dependencies image, following command will build this @@ -125,9 +128,20 @@ if [ "${DEPEND}" == "build" ]; then --build-arg R_VERSION=${R_VERSION} ${GITHUB_WORKFLOW_ARG} \ --tag pecan/depends:${IMAGE_VERSION} \ docker/depends +elif [ "${UPDATE_DEPENDS_FROM_TAG}" != "" ]; then + echo "# Attempting to update from existing pecan/depends:${UPDATE_DEPENDS_FROM_TAG}." + echo "# This is experimental. if it fails, please instead use" + echo "# 'DEPEND=build' to start from a known clean state." + ${DEBUG} docker build \ + --pull \ + --secret id=github_token,env=GITHUB_PAT \ + --build-arg FROM_IMAGE="pecan/depends" \ + --build-arg R_VERSION=${UPDATE_DEPENDS_FROM_TAG} ${GITHUB_WORKFLOW_ARG} \ + --tag pecan/depends:${IMAGE_VERSION} \ + docker/depends else if [ "$( docker image ls -q pecan/depends:${IMAGE_VERSION} )" == "" ]; then - if [ "${PECAN_GIT_BRANCH}" != "master" ]; then + if [ "${PECAN_GIT_BRANCH}" != "main" ]; then ${DEBUG} docker pull pecan/depends:R${R_VERSION} if [ "${IMAGE_VERSION}" != "develop" ]; then ${DEBUG} docker tag pecan/depends:R${R_VERSION} pecan/depends:${IMAGE_VERSION} diff --git a/docker/base/Dockerfile b/docker/base/Dockerfile index 8ec9f3347ef..abbffd47c9b 100644 --- a/docker/base/Dockerfile +++ b/docker/base/Dockerfile @@ -2,7 +2,6 @@ ARG IMAGE_VERSION="latest" ARG FROM_IMAGE="depends" FROM pecan/${FROM_IMAGE}:${IMAGE_VERSION} -MAINTAINER Rob Kooper # ---------------------------------------------------------------------- # PEcAn version information @@ -26,9 +25,7 @@ COPY models /pecan/models/ # install all PEcAn packages # `make clean` is to remove artifacts copied in from host system # (e.g. basgra.so) -RUN --mount=type=secret,id=github_token \ - export GITHUB_PAT=`cat /run/secrets/github_token` \ - && cd /pecan \ +RUN cd /pecan \ && make clean \ && make \ && rm -rf /tmp/downloaded_packages @@ -38,7 +35,7 @@ WORKDIR /work COPY web/workflow.R docker/base/rstudio.sh /work/ # COMMAND TO RUN -CMD Rscript --vanilla workflow.R | tee workflow.Rout +CMD ["bash", "-c", "Rscript --vanilla workflow.R | tee workflow.Rout"] # variables to store in docker image ENV PECAN_VERSION=${PECAN_VERSION} \ diff --git a/docker/data/Dockerfile b/docker/data/Dockerfile index 2ac60cd7e83..1302d201acf 100644 --- a/docker/data/Dockerfile +++ b/docker/data/Dockerfile @@ -1,5 +1,4 @@ FROM alpine -MAINTAINER Rob Kooper # name to use in the machines table FQDN when registering the data files ENV FQDN="" \ @@ -19,4 +18,4 @@ RUN apk --no-cache add bash curl rsync postgresql-client unzip \ && curl -s -o create_met_driver.tar.gz http://isda.ncsa.illinois.edu/~kooper/EBI/create_met_driver.tar.gz COPY add-data.sh add.util.sh /work/ -CMD bash /work/add-data.sh +CMD ["bash", "/work/add-data.sh"] diff --git a/docker/depends/Dockerfile b/docker/depends/Dockerfile index 02905bbf433..b784bcd3bfa 100644 --- a/docker/depends/Dockerfile +++ b/docker/depends/Dockerfile @@ -1,20 +1,10 @@ -ARG R_VERSION="4.0.2" +ARG R_VERSION="4.1" +ARG FROM_IMAGE="rocker/tidyverse" # ---------------------------------------------------------------------- # PECAN FOR MODEL BASE IMAGE # ---------------------------------------------------------------------- -FROM rocker/tidyverse:${R_VERSION} -MAINTAINER Rob Kooper - -# ---------------------------------------------------------------------- -# UPDATE GIT -# This is needed for stretch and github actions -# ---------------------------------------------------------------------- -RUN if [ "$(lsb_release -s -c)" = "stretch" ]; then \ - echo 'deb http://deb.debian.org/debian stretch-backports main' >> /etc/apt/sources.list \ - && apt-get update \ - && apt-get -t stretch-backports upgrade -y git \ - ; fi +FROM ${FROM_IMAGE}:${R_VERSION} # ---------------------------------------------------------------------- # INSTALL BINARY/LIBRARY DEPENDENCIES @@ -26,6 +16,7 @@ RUN apt-get update \ jags \ time \ openssh-client \ + patch \ rsync \ libgdal-dev \ libglpk-dev \ @@ -39,11 +30,8 @@ RUN apt-get update \ # ---------------------------------------------------------------------- # INSTALL DEPENDENCIES # ---------------------------------------------------------------------- -COPY pecan.depends.R / -RUN --mount=type=secret,id=github_token \ - export GITHUB_PAT=`cat /run/secrets/github_token` \ - && Rscript -e "install.packages(c('devtools'))" \ - && Rscript -e "devtools::install_version('roxygen2', '7.2.3', repos = 'cran.r-project.org')" \ +COPY pecan.depends.R pecan_package_dependencies.csv pecan_deps_from_github.txt / +RUN Rscript -e "install.packages(c('desc', 'remotes'))" \ && R_LIBS_USER='/usr/local/lib/R/site-library' Rscript /pecan.depends.R \ && rm -rf /tmp/* diff --git a/docker/depends/pecan.depends.R b/docker/depends/pecan.depends.R index 977faf4197e..0f3057d896d 100644 --- a/docker/depends/pecan.depends.R +++ b/docker/depends/pecan.depends.R @@ -1,152 +1,99 @@ #!/usr/bin/env Rscript -# autogenerated do not edit -# use scripts/generate_dependencies.R # Don't use X11 for rgl Sys.setenv(RGL_USE_NULL = TRUE) rlib <- Sys.getenv('R_LIBS_USER', '/usr/local/lib/R/site-library') Sys.setenv(RLIB = rlib) -# install remotes first in case packages are references in dependencies -remotes::install_github(c( -'araiho/linkages_package', -'chuhousen/amerifluxr', -'ebimodeling/biocro@0.951', -'MikkoPeltoniemi/Rpreles', -'r-lib/testthat@v3.1.6', -'r-lib/vdiffr@v1.0.4', -'ropensci/geonames', -'ropensci/nneo', -'rstudio/rmarkdown@v2.20' -), lib = rlib) -# install all packages (depends, imports, suggests) -wanted <- c( -'abind', -'amerifluxr', -'assertthat', -'BayesianTools', -'BioCro', -'bit64', -'BrownDog', -'coda', -'corrplot', -'curl', -'data.table', -'dataone', -'datapack', -'DBI', -'dbplyr', -'devtools', -'doParallel', -'dplR', -'dplyr', -'ellipse', -'emdbook', -'foreach', -'fs', -'furrr', -'future', -'geonames', -'getPass', -'ggmap', -'ggmcmc', -'ggplot2', -'ggrepel', -'glue', -'graphics', -'grDevices', -'grid', -'gridExtra', -'hdf5r', -'here', -'httr', -'IDPmisc', -'jsonlite', -'knitr', -'lattice', -'linkages', -'lqmm', -'lubridate', -'Maeswrap', -'magic', -'magrittr', -'maps', -'maptools', -'markdown', -'MASS', -'Matrix', -'mclust', -'MCMCpack', -'methods', -'mgcv', -'minpack.lm', -'mlegp', -'mockery', -'MODISTools', -'mvbutils', -'mvtnorm', -'ncdf4', -'neonstore', -'neonUtilities', -'nimble', -'nneo', -'optparse', -'parallel', -'plotrix', -'plyr', -'png', -'prodlim', -'progress', -'purrr', -'pwr', -'R.utils', -'randtoolbox', -'raster', -'rcrossref', -'REddyProc', -'redland', -'reshape', -'reshape2', -'reticulate', -'rgdal', -'rjags', -'rjson', -'rlang', -'rlist', -'rmarkdown', -'RPostgres', -'RPostgreSQL', -'Rpreles', -'RSQLite', -'sf', -'SimilarityMeasures', -'sirt', -'sp', -'stats', -'stringi', -'stringr', -'swfscMisc', -'terra', -'testthat', -'tibble', -'tictoc', -'tidyr', -'tidyselect', -'tidyverse', -'tools', -'traits', -'TruncatedNormal', -'truncnorm', -'units', -'urltools', -'utils', -'vdiffr', -'withr', -'XML', -'xtable', -'xts', -'zoo' -) -missing <- wanted[!(wanted %in% installed.packages()[,'Package'])] +# Find the latest of several possible minimum package versions +condense_version_requirements <- function(specs) { + if (all(specs == "*")) { + # any version is acceptable + return("*") + } + specs <- unique(specs[specs != "*"]) + versions <- package_version( + gsub("[^[:digit:].-]+", "", specs)) + + if ((length(unique(versions)) > 1) && any(!grepl(">", specs))) { + # Can't assume the latest version works for all, so give up. + # We *could* write more to handle this case if needed, but it seems very rare: + # available.packages() shows `<=` or `==` deps in just 4 of 20297 CRAN packages + # + # Since the package name wasn't passed in here, we unhelpfully print *just* + # the offending versions and send the user back to the CSV for details. + stop( + "Found multiple version requirements (", + paste(dQuote(specs), collapse = ", "), ") for the same dependency, ", + "and not all are minimum versions (e.g. `>= x.y.z`). ", + "Exact (`==`) or maximum (`<=`) version reuirements are only allowed ", + "if all PEcAn packages declare the same version. ", + "Sorry, this function doesn't know which dependency caused this. ", + "To find it, search for these version strings in ", + "'pecan_package_dependencies.csv'.") + } + specs[versions == max(versions)] +} + +# Install or newer, +# upgrading dependencies only if needed to satisfy stated version requirements +ensure_version <- function(pkg, version) { + vers <- gsub('[^[:digit:].-]+', '', version) + cmp <- get(gsub('[^<>=]+', '', version)) + ok <- requireNamespace(pkg, quietly = TRUE) && + cmp(packageVersion(pkg), vers) + if (!ok) { + # install pkg and any *missing* dependencies + remotes::install_version(pkg, version, dependencies = TRUE, upgrade = FALSE) + # Now check for installed but *incompatible* dependencies + # (install_version doesn't resolve these when upgrade=FALSE) + dep <- desc::desc_get_deps(system.file("DESCRIPTION", package = pkg)) + dep <- dep[ + dep$type %in% c("Depends", "Imports", "LinkingTo") + & dep$version != "*" + & dep$package != "R",] + invisible(Map(ensure_version, dep$package, dep$version)) + } + +} + +# Read list of dependencies. +# NOTE: These files are autogenerated -- +# use scripts/generate_dependencies.R to edit them. +all_deps <- read.csv("pecan_package_dependencies.csv") |> + subset(!is_pecan) +gh_repos <- readLines("pecan_deps_from_github.txt") + + +# install remotes first, so they're present when checked as dependencies +# NOTE: script doesn't know what package a given repo provides, +# it just installs whatever it finds at each address. +remotes::install_github(gh_repos, lib = rlib) + + +# For deps used by multiple packages, find a version that works for all +uniq_deps <- tapply( + all_deps$version, + INDEX = all_deps$package, + FUN = condense_version_requirements) + + +# Install deps that declare no version restriction. +# We'll install these with one plain old `install.packages()` call. +unversioned <- names(uniq_deps[uniq_deps == "*"]) +missing <- unversioned[!(unversioned %in% installed.packages()[,'Package'])] install.packages(missing, lib = rlib) + + +# Install deps that need a set minimum version. +# We'll install these with `remotes::install_version`, +# directing it to look outside our fixed-date CRAN snapshot if +# it can't fill the version req from snapshot versions. +# (Assumes our CRAN uses the same URL scheme as Posit package manager) +options(repos = c( + getOption('repos'), + sub(r'(\d{4}-\d{2}-\d{2})', 'latest', getOption('repos')) +)) +versioned <- uniq_deps[uniq_deps != "*"] +invisible(Map(ensure_version, names(versioned), versioned)) \ No newline at end of file diff --git a/docker/depends/pecan_deps_from_github.txt b/docker/depends/pecan_deps_from_github.txt new file mode 100644 index 00000000000..043892f37d7 --- /dev/null +++ b/docker/depends/pecan_deps_from_github.txt @@ -0,0 +1,11 @@ +adokter/suntools +araiho/linkages_package +chuhousen/amerifluxr +ebimodeling/biocro@0.951 +MikkoPeltoniemi/Rpreles +RemkoDuursma/Maeswrap +ropensci/geonames +ropensci/nneo +SticsRPacks/SticsOnR +SticsRPacks/SticsRFiles +VangiElia/GEDI4R diff --git a/docker/depends/pecan_package_dependencies.csv b/docker/depends/pecan_package_dependencies.csv new file mode 100644 index 00000000000..1e8d04c40a9 --- /dev/null +++ b/docker/depends/pecan_package_dependencies.csv @@ -0,0 +1,694 @@ +"package","version","needed_by_dir","type","is_pecan" +"abind","*","modules/assim.batch","Imports",FALSE +"abind",">= 1.4.5","base/utils","Imports",FALSE +"abind",">= 1.4.5","models/ed","Imports",FALSE +"abind",">= 1.4.5","modules/data.atmosphere","Imports",FALSE +"amerifluxr","*","modules/data.atmosphere","Imports",FALSE +"arrow","*","modules/data.atmosphere","Imports",FALSE +"assertthat","*","models/ed","Imports",FALSE +"assertthat","*","modules/data.atmosphere","Imports",FALSE +"BayesianTools","*","modules/assim.batch","Imports",FALSE +"BayesianTools","*","modules/rtm","Imports",FALSE +"BioCro","*","models/biocro","Suggests",FALSE +"bit64","*","base/db","Suggests",FALSE +"coda","*","models/maespa","Suggests",FALSE +"coda","*","models/sipnet","Suggests",FALSE +"coda","*","modules/assim.sequential","Imports",FALSE +"coda","*","modules/data.land","Imports",FALSE +"coda","*","modules/rtm","Imports",FALSE +"coda",">= 0.18","base/utils","Suggests",FALSE +"coda",">= 0.18","modules/allometry","Imports",FALSE +"coda",">= 0.18","modules/assim.batch","Imports",FALSE +"coda",">= 0.18","modules/emulator","Imports",FALSE +"coda",">= 0.18","modules/meta.analysis","Imports",FALSE +"coda",">= 0.18","modules/photosynthesis","Imports",FALSE +"corrplot","*","modules/assim.sequential","Suggests",FALSE +"curl","*","base/utils","Imports",FALSE +"curl","*","modules/data.atmosphere","Imports",FALSE +"curl","*","modules/data.land","Imports",FALSE +"curl","*","modules/data.remote","Imports",FALSE +"data.table","*","base/db","Suggests",FALSE +"data.table","*","base/utils","Suggests",FALSE +"data.table","*","base/visualization","Imports",FALSE +"data.table","*","models/biocro","Imports",FALSE +"data.table","*","modules/data.remote","Suggests",FALSE +"dataone","*","modules/data.land","Suggests",FALSE +"datapack","*","modules/data.land","Suggests",FALSE +"DBI","*","base/db","Imports",FALSE +"DBI","*","modules/data.remote","Imports",FALSE +"dbplyr",">= 2.4.0","base/db","Imports",FALSE +"devtools","*","models/ed","Suggests",FALSE +"doParallel","*","modules/data.atmosphere","Suggests",FALSE +"doParallel","*","modules/data.remote","Imports",FALSE +"doSNOW","*","base/remote","Suggests",FALSE +"dplR","*","modules/data.land","Imports",FALSE +"dplyr","*","base/qaqc","Imports",FALSE +"dplyr","*","base/remote","Imports",FALSE +"dplyr","*","base/utils","Imports",FALSE +"dplyr","*","base/workflow","Imports",FALSE +"dplyr","*","models/biocro","Imports",FALSE +"dplyr","*","models/ed","Imports",FALSE +"dplyr","*","models/ldndc","Imports",FALSE +"dplyr","*","models/sipnet","Imports",FALSE +"dplyr","*","models/stics","Imports",FALSE +"dplyr","*","modules/assim.sequential","Imports",FALSE +"dplyr","*","modules/benchmark","Imports",FALSE +"dplyr","*","modules/data.land","Imports",FALSE +"dplyr","*","modules/data.remote","Suggests",FALSE +"dplyr","*","modules/priors","Suggests",FALSE +"dplyr","*","modules/uncertainty","Imports",FALSE +"dplyr",">= 0.8.1","modules/data.atmosphere","Imports",FALSE +"dplyr",">= 1.1.2","base/db","Imports",FALSE +"ellipse","*","modules/assim.batch","Imports",FALSE +"emdbook","*","modules/assim.sequential","Suggests",FALSE +"exactextractr","*","modules/assim.sequential","Suggests",FALSE +"foreach","*","base/remote","Imports",FALSE +"foreach","*","modules/data.atmosphere","Suggests",FALSE +"foreach","*","modules/data.remote","Imports",FALSE +"fs","*","base/db","Imports",FALSE +"fs","*","modules/data.land","Imports",FALSE +"furrr","*","base/remote","Imports",FALSE +"furrr","*","modules/assim.sequential","Imports",FALSE +"furrr","*","modules/data.atmosphere","Suggests",FALSE +"furrr","*","modules/data.land","Imports",FALSE +"furrr","*","modules/data.remote","Imports",FALSE +"future","*","modules/assim.sequential","Imports",FALSE +"future","*","modules/data.atmosphere","Suggests",FALSE +"future","*","modules/data.land","Imports",FALSE +"future","*","modules/data.remote","Imports",FALSE +"GEDI4R","*","modules/data.remote","Suggests",FALSE +"geonames","> 0.998","modules/data.atmosphere","Imports",FALSE +"getPass","*","base/remote","Suggests",FALSE +"getPass","*","modules/data.land","Suggests",FALSE +"getPass","*","modules/data.remote","Suggests",FALSE +"ggmcmc","*","modules/meta.analysis","Suggests",FALSE +"ggplot2","*","base/utils","Suggests",FALSE +"ggplot2","*","base/visualization","Imports",FALSE +"ggplot2","*","modules/assim.sequential","Imports",FALSE +"ggplot2","*","modules/benchmark","Imports",FALSE +"ggplot2","*","modules/data.atmosphere","Imports",FALSE +"ggplot2","*","modules/data.remote","Suggests",FALSE +"ggplot2","*","modules/meta.analysis","Suggests",FALSE +"ggplot2","*","modules/priors","Imports",FALSE +"ggplot2","*","modules/uncertainty","Imports",FALSE +"ggpubr","*","modules/assim.sequential","Suggests",FALSE +"ggrepel","*","modules/assim.sequential","Suggests",FALSE +"glue","*","base/db","Imports",FALSE +"glue","*","models/ed","Imports",FALSE +"glue","*","modules/assim.sequential","Suggests",FALSE +"glue","*","modules/data.atmosphere","Imports",FALSE +"glue","*","modules/data.land","Suggests",FALSE +"glue","*","modules/data.remote","Imports",FALSE +"graphics","*","base/qaqc","Imports",FALSE +"graphics","*","modules/allometry","Imports",FALSE +"graphics","*","modules/assim.batch","Imports",FALSE +"graphics","*","modules/photosynthesis","Imports",FALSE +"grDevices","*","modules/allometry","Imports",FALSE +"grDevices","*","modules/assim.batch","Imports",FALSE +"grDevices","*","modules/benchmark","Imports",FALSE +"grDevices","*","modules/data.remote","Suggests",FALSE +"grid","*","base/visualization","Suggests",FALSE +"gridExtra","*","modules/assim.sequential","Suggests",FALSE +"gridExtra","*","modules/benchmark","Imports",FALSE +"gridExtra","*","modules/uncertainty","Imports",FALSE +"hdf5r","*","models/ed","Imports",FALSE +"here","*","base/db","Suggests",FALSE +"httr","*","base/remote","Imports",FALSE +"httr","*","modules/data.atmosphere","Imports",FALSE +"httr","*","modules/data.land","Imports",FALSE +"httr","*","modules/data.remote","Suggests",FALSE +"IDPmisc","*","modules/assim.batch","Imports",FALSE +"jsonlite","*","base/remote","Imports",FALSE +"jsonlite","*","models/stics","Imports",FALSE +"jsonlite","*","modules/data.atmosphere","Imports",FALSE +"jsonlite","*","modules/data.remote","Suggests",FALSE +"keras3",">= 1.0.0","modules/assim.sequential","Suggests",FALSE +"knitr","*","base/visualization","Suggests",FALSE +"knitr","*","models/biocro","Suggests",FALSE +"knitr","*","models/ed","Suggests",FALSE +"knitr","*","models/maat","Suggests",FALSE +"knitr","*","modules/data.atmosphere","Suggests",FALSE +"knitr","*","modules/priors","Suggests",FALSE +"knitr",">= 1.42","base/db","Suggests",FALSE +"knitr",">= 1.42","base/qaqc","Suggests",FALSE +"knitr",">= 1.42","modules/allometry","Suggests",FALSE +"knitr",">= 1.42","modules/assim.batch","Suggests",FALSE +"knitr",">= 1.42","modules/meta.analysis","Suggests",FALSE +"knitr",">= 1.42","modules/photosynthesis","Suggests",FALSE +"knitr",">= 1.42","modules/rtm","Suggests",FALSE +"lattice","*","modules/meta.analysis","Imports",FALSE +"linkages","*","models/linkages","Suggests",FALSE +"lqmm","*","modules/assim.batch","Imports",FALSE +"lubridate","*","base/db","Imports",FALSE +"lubridate","*","models/basgra","Imports",FALSE +"lubridate","*","models/dvmdostem","Imports",FALSE +"lubridate","*","models/ed","Imports",FALSE +"lubridate","*","models/ldndc","Imports",FALSE +"lubridate","*","models/stics","Imports",FALSE +"lubridate","*","modules/data.land","Imports",FALSE +"lubridate","*","modules/data.remote","Suggests",FALSE +"lubridate",">= 1.6.0","base/settings","Imports",FALSE +"lubridate",">= 1.6.0","base/utils","Imports",FALSE +"lubridate",">= 1.6.0","models/dalec","Imports",FALSE +"lubridate",">= 1.6.0","models/fates","Imports",FALSE +"lubridate",">= 1.6.0","models/gday","Imports",FALSE +"lubridate",">= 1.6.0","models/jules","Imports",FALSE +"lubridate",">= 1.6.0","models/linkages","Imports",FALSE +"lubridate",">= 1.6.0","models/lpjguess","Imports",FALSE +"lubridate",">= 1.6.0","models/maat","Imports",FALSE +"lubridate",">= 1.6.0","models/maespa","Imports",FALSE +"lubridate",">= 1.6.0","models/preles","Imports",FALSE +"lubridate",">= 1.6.0","models/sipnet","Imports",FALSE +"lubridate",">= 1.6.0","modules/assim.batch","Imports",FALSE +"lubridate",">= 1.6.0","modules/assim.sequential","Imports",FALSE +"lubridate",">= 1.6.0","modules/benchmark","Imports",FALSE +"lubridate",">= 1.6.0","modules/data.atmosphere","Imports",FALSE +"lubridate",">= 1.6.0","modules/rtm","Imports",FALSE +"lubridate",">= 1.7.0","models/biocro","Imports",FALSE +"Maeswrap","*","models/maespa","Suggests",FALSE +"magic",">= 1.5.0","modules/assim.sequential","Suggests",FALSE +"magrittr","*","base/db","Imports",FALSE +"magrittr","*","base/utils","Imports",FALSE +"magrittr","*","models/ed","Imports",FALSE +"magrittr","*","modules/assim.sequential","Imports",FALSE +"magrittr","*","modules/benchmark","Imports",FALSE +"magrittr","*","modules/data.land","Imports",FALSE +"magrittr","*","modules/data.remote","Imports",FALSE +"markdown","*","modules/allometry","Suggests",FALSE +"markdown","*","modules/photosynthesis","Suggests",FALSE +"MASS","*","base/utils","Suggests",FALSE +"MASS","*","modules/assim.batch","Imports",FALSE +"MASS","*","modules/data.atmosphere","Imports",FALSE +"MASS","*","modules/meta.analysis","Imports",FALSE +"MASS","*","modules/priors","Imports",FALSE +"MASS","*","modules/rtm","Imports",FALSE +"Matrix","*","modules/assim.sequential","Imports",FALSE +"mclust","*","modules/rtm","Suggests",FALSE +"MCMCpack","*","modules/allometry","Imports",FALSE +"MCMCpack","*","modules/assim.batch","Imports",FALSE +"MCMCpack","*","modules/emulator","Imports",FALSE +"methods","*","base/db","Imports",FALSE +"methods","*","base/settings","Depends",FALSE +"methods","*","modules/allometry","Imports",FALSE +"methods","*","modules/assim.batch","Imports",FALSE +"methods","*","modules/assim.sequential","Suggests",FALSE +"methods","*","modules/emulator","Imports",FALSE +"mgcv","*","modules/data.atmosphere","Imports",FALSE +"minpack.lm","*","modules/rtm","Suggests",FALSE +"mlegp","*","modules/assim.batch","Imports",FALSE +"mockery","*","base/all","Suggests",FALSE +"mockery","*","base/qaqc","Suggests",FALSE +"mockery","*","base/remote","Suggests",FALSE +"mockery","*","base/settings","Suggests",FALSE +"mockery","*","base/utils","Suggests",FALSE +"mockery","*","base/visualization","Suggests",FALSE +"mockery","*","base/workflow","Suggests",FALSE +"mockery","*","modules/data.atmosphere","Suggests",FALSE +"mockery","*","modules/meta.analysis","Suggests",FALSE +"mockery",">= 0.3.0","models/biocro","Suggests",FALSE +"mockery",">= 0.4.3","base/db","Suggests",FALSE +"MODISTools",">= 1.1.0","modules/data.remote","Imports",FALSE +"mvbutils","*","base/qaqc","Suggests",FALSE +"mvtnorm","*","modules/allometry","Imports",FALSE +"mvtnorm","*","modules/assim.batch","Imports",FALSE +"mvtnorm","*","modules/assim.sequential","Imports",FALSE +"mvtnorm","*","modules/data.land","Imports",FALSE +"mvtnorm","*","modules/emulator","Imports",FALSE +"ncdf4","*","base/db","Imports",FALSE +"ncdf4","*","models/basgra","Imports",FALSE +"ncdf4","*","models/dvmdostem","Imports",FALSE +"ncdf4","*","models/ldndc","Imports",FALSE +"ncdf4","*","models/sibcasa","Imports",FALSE +"ncdf4","*","models/stics","Imports",FALSE +"ncdf4","*","modules/assim.sequential","Imports",FALSE +"ncdf4","*","modules/data.remote","Imports",FALSE +"ncdf4",">= 1.15","base/utils","Imports",FALSE +"ncdf4",">= 1.15","base/visualization","Imports",FALSE +"ncdf4",">= 1.15","models/biocro","Imports",FALSE +"ncdf4",">= 1.15","models/clm45","Imports",FALSE +"ncdf4",">= 1.15","models/dalec","Imports",FALSE +"ncdf4",">= 1.15","models/ed","Imports",FALSE +"ncdf4",">= 1.15","models/fates","Imports",FALSE +"ncdf4",">= 1.15","models/gday","Imports",FALSE +"ncdf4",">= 1.15","models/jules","Imports",FALSE +"ncdf4",">= 1.15","models/linkages","Imports",FALSE +"ncdf4",">= 1.15","models/lpjguess","Imports",FALSE +"ncdf4",">= 1.15","models/maat","Imports",FALSE +"ncdf4",">= 1.15","models/maespa","Imports",FALSE +"ncdf4",">= 1.15","models/preles","Imports",FALSE +"ncdf4",">= 1.15","models/sipnet","Imports",FALSE +"ncdf4",">= 1.15","modules/assim.batch","Imports",FALSE +"ncdf4",">= 1.15","modules/benchmark","Imports",FALSE +"ncdf4",">= 1.15","modules/data.atmosphere","Imports",FALSE +"ncdf4",">= 1.15","modules/data.land","Imports",FALSE +"neonstore","*","modules/data.land","Imports",FALSE +"neonUtilities","*","modules/data.land","Imports",FALSE +"nimble","*","modules/assim.sequential","Imports",FALSE +"nneo","*","modules/data.atmosphere","Imports",FALSE +"optparse","*","base/settings","Imports",FALSE +"parallel","*","modules/assim.batch","Imports",FALSE +"parallel","*","modules/data.atmosphere","Suggests",FALSE +"parallel","*","modules/data.remote","Imports",FALSE +"PEcAn.allometry","*","base/all","Suggests",TRUE +"PEcAn.assim.batch","*","base/all","Depends",TRUE +"PEcAn.assim.batch","*","modules/rtm","Imports",TRUE +"PEcAn.benchmark","*","base/all","Depends",TRUE +"PEcAn.benchmark","*","modules/assim.batch","Imports",TRUE +"PEcAn.benchmark","*","modules/assim.sequential","Suggests",TRUE +"PEcAn.benchmark","*","modules/data.land","Imports",TRUE +"PEcAn.BIOCRO","*","base/all","Suggests",TRUE +"PEcAn.BIOCRO","*","base/qaqc","Suggests",TRUE +"PEcAn.DALEC","*","base/all","Suggests",TRUE +"PEcAn.data.atmosphere","*","base/all","Depends",TRUE +"PEcAn.data.atmosphere","*","base/workflow","Imports",TRUE +"PEcAn.data.atmosphere","*","models/basgra","Imports",TRUE +"PEcAn.data.atmosphere","*","models/biocro","Imports",TRUE +"PEcAn.data.atmosphere","*","models/ed","Imports",TRUE +"PEcAn.data.atmosphere","*","models/jules","Imports",TRUE +"PEcAn.data.atmosphere","*","models/ldndc","Imports",TRUE +"PEcAn.data.atmosphere","*","models/maat","Imports",TRUE +"PEcAn.data.atmosphere","*","models/maespa","Imports",TRUE +"PEcAn.data.atmosphere","*","models/preles","Imports",TRUE +"PEcAn.data.atmosphere","*","models/sipnet","Imports",TRUE +"PEcAn.data.land","*","base/all","Depends",TRUE +"PEcAn.data.land","*","base/workflow","Imports",TRUE +"PEcAn.data.land","*","models/biocro","Imports",TRUE +"PEcAn.data.land","*","models/ed","Imports",TRUE +"PEcAn.data.land","*","models/ldndc","Imports",TRUE +"PEcAn.data.land","*","models/linkages","Imports",TRUE +"PEcAn.data.land","*","models/sipnet","Imports",TRUE +"PEcAn.data.land","*","modules/assim.sequential","Suggests",TRUE +"PEcAn.data.land","*","modules/benchmark","Suggests",TRUE +"PEcAn.data.remote","*","base/all","Depends",TRUE +"PEcAn.data.remote","*","modules/assim.sequential","Suggests",TRUE +"PEcAn.DB","*","base/all","Depends",TRUE +"PEcAn.DB","*","base/qaqc","Imports",TRUE +"PEcAn.DB","*","base/settings","Imports",TRUE +"PEcAn.DB","*","base/workflow","Imports",TRUE +"PEcAn.DB","*","models/biocro","Suggests",TRUE +"PEcAn.DB","*","models/linkages","Imports",TRUE +"PEcAn.DB","*","models/template","Imports",TRUE +"PEcAn.DB","*","modules/allometry","Imports",TRUE +"PEcAn.DB","*","modules/assim.batch","Imports",TRUE +"PEcAn.DB","*","modules/assim.sequential","Imports",TRUE +"PEcAn.DB","*","modules/benchmark","Imports",TRUE +"PEcAn.DB","*","modules/data.atmosphere","Imports",TRUE +"PEcAn.DB","*","modules/data.land","Imports",TRUE +"PEcAn.DB","*","modules/data.remote","Imports",TRUE +"PEcAn.DB","*","modules/meta.analysis","Imports",TRUE +"PEcAn.DB","*","modules/uncertainty","Imports",TRUE +"PEcAn.ED2","*","base/all","Suggests",TRUE +"PEcAn.ED2","*","base/qaqc","Suggests",TRUE +"PEcAn.ED2","*","modules/rtm","Suggests",TRUE +"PEcAn.emulator","*","base/all","Depends",TRUE +"PEcAn.emulator","*","modules/assim.batch","Imports",TRUE +"PEcAn.emulator","*","modules/uncertainty","Imports",TRUE +"PEcAn.LINKAGES","*","base/all","Suggests",TRUE +"PEcAn.logger","*","base/all","Depends",TRUE +"PEcAn.logger","*","base/db","Imports",TRUE +"PEcAn.logger","*","base/qaqc","Imports",TRUE +"PEcAn.logger","*","base/remote","Imports",TRUE +"PEcAn.logger","*","base/settings","Imports",TRUE +"PEcAn.logger","*","base/utils","Imports",TRUE +"PEcAn.logger","*","base/visualization","Imports",TRUE +"PEcAn.logger","*","base/workflow","Imports",TRUE +"PEcAn.logger","*","models/basgra","Imports",TRUE +"PEcAn.logger","*","models/biocro","Imports",TRUE +"PEcAn.logger","*","models/cable","Imports",TRUE +"PEcAn.logger","*","models/clm45","Depends",TRUE +"PEcAn.logger","*","models/dalec","Imports",TRUE +"PEcAn.logger","*","models/dvmdostem","Imports",TRUE +"PEcAn.logger","*","models/ed","Imports",TRUE +"PEcAn.logger","*","models/fates","Imports",TRUE +"PEcAn.logger","*","models/gday","Imports",TRUE +"PEcAn.logger","*","models/jules","Imports",TRUE +"PEcAn.logger","*","models/ldndc","Imports",TRUE +"PEcAn.logger","*","models/linkages","Imports",TRUE +"PEcAn.logger","*","models/lpjguess","Imports",TRUE +"PEcAn.logger","*","models/maat","Imports",TRUE +"PEcAn.logger","*","models/maespa","Imports",TRUE +"PEcAn.logger","*","models/preles","Imports",TRUE +"PEcAn.logger","*","models/sibcasa","Imports",TRUE +"PEcAn.logger","*","models/sipnet","Imports",TRUE +"PEcAn.logger","*","models/stics","Imports",TRUE +"PEcAn.logger","*","models/template","Imports",TRUE +"PEcAn.logger","*","modules/assim.batch","Imports",TRUE +"PEcAn.logger","*","modules/assim.sequential","Imports",TRUE +"PEcAn.logger","*","modules/benchmark","Imports",TRUE +"PEcAn.logger","*","modules/data.atmosphere","Imports",TRUE +"PEcAn.logger","*","modules/data.land","Imports",TRUE +"PEcAn.logger","*","modules/data.remote","Imports",TRUE +"PEcAn.logger","*","modules/meta.analysis","Imports",TRUE +"PEcAn.logger","*","modules/priors","Imports",TRUE +"PEcAn.logger","*","modules/rtm","Imports",TRUE +"PEcAn.logger","*","modules/uncertainty","Imports",TRUE +"PEcAn.MA","*","base/all","Depends",TRUE +"PEcAn.MA","*","modules/assim.batch","Imports",TRUE +"PEcAn.MA","*","modules/priors","Imports",TRUE +"PEcAn.photosynthesis","*","base/all","Suggests",TRUE +"PEcAn.priors","*","base/all","Depends",TRUE +"PEcAn.priors","*","modules/uncertainty","Imports",TRUE +"PEcAn.remote","*","base/all","Depends",TRUE +"PEcAn.remote","*","base/db","Imports",TRUE +"PEcAn.remote","*","base/settings","Imports",TRUE +"PEcAn.remote","*","base/workflow","Imports",TRUE +"PEcAn.remote","*","models/biocro","Imports",TRUE +"PEcAn.remote","*","models/dalec","Imports",TRUE +"PEcAn.remote","*","models/ed","Imports",TRUE +"PEcAn.remote","*","models/fates","Imports",TRUE +"PEcAn.remote","*","models/gday","Imports",TRUE +"PEcAn.remote","*","models/jules","Imports",TRUE +"PEcAn.remote","*","models/ldndc","Imports",TRUE +"PEcAn.remote","*","models/linkages","Imports",TRUE +"PEcAn.remote","*","models/lpjguess","Imports",TRUE +"PEcAn.remote","*","models/maat","Imports",TRUE +"PEcAn.remote","*","models/maespa","Imports",TRUE +"PEcAn.remote","*","models/sipnet","Imports",TRUE +"PEcAn.remote","*","models/stics","Imports",TRUE +"PEcAn.remote","*","modules/assim.batch","Imports",TRUE +"PEcAn.remote","*","modules/assim.sequential","Imports",TRUE +"PEcAn.remote","*","modules/data.atmosphere","Imports",TRUE +"PEcAn.remote","*","modules/data.land","Imports",TRUE +"PEcAn.remote","*","modules/data.remote","Imports",TRUE +"PEcAn.settings","*","base/all","Depends",TRUE +"PEcAn.settings","*","base/workflow","Imports",TRUE +"PEcAn.settings","*","models/biocro","Imports",TRUE +"PEcAn.settings","*","models/ed","Imports",TRUE +"PEcAn.settings","*","models/maat","Imports",TRUE +"PEcAn.settings","*","models/stics","Imports",TRUE +"PEcAn.settings","*","modules/assim.batch","Imports",TRUE +"PEcAn.settings","*","modules/assim.sequential","Imports",TRUE +"PEcAn.settings","*","modules/benchmark","Imports",TRUE +"PEcAn.settings","*","modules/data.atmosphere","Suggests",TRUE +"PEcAn.settings","*","modules/data.land","Suggests",TRUE +"PEcAn.settings","*","modules/meta.analysis","Imports",TRUE +"PEcAn.settings","*","modules/uncertainty","Imports",TRUE +"PEcAn.SIPNET","*","base/all","Suggests",TRUE +"PEcAn.SIPNET","*","base/qaqc","Suggests",TRUE +"PEcAn.uncertainty","*","base/all","Depends",TRUE +"PEcAn.uncertainty","*","base/workflow","Imports",TRUE +"PEcAn.uncertainty","*","modules/assim.batch","Imports",TRUE +"PEcAn.uncertainty","*","modules/assim.sequential","Imports",TRUE +"PEcAn.utils","*","base/all","Depends",TRUE +"PEcAn.utils","*","base/db","Imports",TRUE +"PEcAn.utils","*","base/qaqc","Suggests",TRUE +"PEcAn.utils","*","base/settings","Imports",TRUE +"PEcAn.utils","*","base/workflow","Imports",TRUE +"PEcAn.utils","*","models/biocro","Imports",TRUE +"PEcAn.utils","*","models/clm45","Depends",TRUE +"PEcAn.utils","*","models/dalec","Imports",TRUE +"PEcAn.utils","*","models/ed","Imports",TRUE +"PEcAn.utils","*","models/fates","Imports",TRUE +"PEcAn.utils","*","models/gday","Depends",TRUE +"PEcAn.utils","*","models/jules","Imports",TRUE +"PEcAn.utils","*","models/linkages","Imports",TRUE +"PEcAn.utils","*","models/lpjguess","Imports",TRUE +"PEcAn.utils","*","models/maat","Imports",TRUE +"PEcAn.utils","*","models/maespa","Imports",TRUE +"PEcAn.utils","*","models/preles","Imports",TRUE +"PEcAn.utils","*","models/sipnet","Imports",TRUE +"PEcAn.utils","*","modules/assim.batch","Imports",TRUE +"PEcAn.utils","*","modules/assim.sequential","Suggests",TRUE +"PEcAn.utils","*","modules/benchmark","Imports",TRUE +"PEcAn.utils","*","modules/data.atmosphere","Imports",TRUE +"PEcAn.utils","*","modules/data.land","Imports",TRUE +"PEcAn.utils","*","modules/data.remote","Imports",TRUE +"PEcAn.utils","*","modules/meta.analysis","Imports",TRUE +"PEcAn.utils","*","modules/priors","Imports",TRUE +"PEcAn.utils","*","modules/rtm","Suggests",TRUE +"PEcAn.utils","*","modules/uncertainty","Imports",TRUE +"PEcAn.utils",">= 1.4.8","models/basgra","Imports",TRUE +"PEcAn.utils",">= 1.4.8","models/cable","Imports",TRUE +"PEcAn.utils",">= 1.4.8","models/dvmdostem","Imports",TRUE +"PEcAn.utils",">= 1.4.8","models/ldndc","Imports",TRUE +"PEcAn.utils",">= 1.4.8","models/stics","Imports",TRUE +"PEcAn.utils",">= 1.4.8","models/template","Imports",TRUE +"PEcAn.visualization","*","modules/assim.sequential","Suggests",TRUE +"PEcAn.visualization","*","modules/data.land","Imports",TRUE +"PEcAn.visualization","*","modules/priors","Suggests",TRUE +"PEcAn.workflow","*","base/all","Depends",TRUE +"PEcAn.workflow","*","modules/assim.batch","Imports",TRUE +"PEcAn.workflow","*","modules/assim.sequential","Imports",TRUE +"plotrix","*","base/qaqc","Imports",FALSE +"plotrix","*","modules/assim.sequential","Suggests",FALSE +"plyr",">= 1.8.4","base/visualization","Imports",FALSE +"plyr",">= 1.8.4","modules/assim.sequential","Suggests",FALSE +"plyr",">= 1.8.4","modules/uncertainty","Imports",FALSE +"png","*","base/visualization","Suggests",FALSE +"prodlim","*","modules/assim.batch","Imports",FALSE +"progress","*","modules/data.atmosphere","Suggests",FALSE +"purrr","*","base/db","Imports",FALSE +"purrr","*","base/settings","Imports",FALSE +"purrr","*","base/utils","Imports",FALSE +"purrr","*","models/ed","Imports",FALSE +"purrr","*","modules/assim.sequential","Imports",FALSE +"purrr","*","modules/data.land","Imports",FALSE +"purrr","*","modules/data.remote","Imports",FALSE +"purrr","*","modules/uncertainty","Imports",FALSE +"purrr",">= 0.2.3","base/workflow","Imports",FALSE +"purrr",">= 0.2.3","modules/data.atmosphere","Imports",FALSE +"pwr","*","modules/rtm","Suggests",FALSE +"R.utils","*","base/db","Imports",FALSE +"randomForest","*","modules/assim.sequential","Suggests",FALSE +"randtoolbox","*","base/utils","Suggests",FALSE +"randtoolbox","*","modules/uncertainty","Imports",FALSE +"raster","*","base/visualization","Suggests",FALSE +"raster","*","modules/assim.sequential","Suggests",FALSE +"raster","*","modules/data.atmosphere","Imports",FALSE +"raster","*","modules/data.land","Suggests",FALSE +"raster","*","modules/data.remote","Suggests",FALSE +"rcrossref","*","base/db","Suggests",FALSE +"readr","*","models/ldndc","Imports",FALSE +"readr","*","modules/assim.sequential","Suggests",FALSE +"REddyProc","*","modules/data.atmosphere","Imports",FALSE +"redland","*","modules/data.land","Suggests",FALSE +"reshape","*","modules/data.remote","Suggests",FALSE +"reshape2","*","base/visualization","Imports",FALSE +"reshape2","*","modules/benchmark","Imports",FALSE +"reshape2","*","modules/data.atmosphere","Imports",FALSE +"reshape2",">= 1.4.2","modules/assim.sequential","Suggests",FALSE +"reticulate","*","modules/data.atmosphere","Suggests",FALSE +"reticulate","*","modules/data.land","Suggests",FALSE +"reticulate","*","modules/data.remote","Imports",FALSE +"rjags","*","base/utils","Suggests",FALSE +"rjags","*","modules/assim.batch","Imports",FALSE +"rjags","*","modules/data.land","Imports",FALSE +"rjags","*","modules/meta.analysis","Imports",FALSE +"rjags","*","modules/photosynthesis","Depends",FALSE +"rjags","*","modules/priors","Suggests",FALSE +"rjson","*","models/dvmdostem","Imports",FALSE +"rlang","*","base/db","Imports",FALSE +"rlang","*","base/qaqc","Imports",FALSE +"rlang","*","base/utils","Imports",FALSE +"rlang","*","base/visualization","Imports",FALSE +"rlang","*","models/biocro","Imports",FALSE +"rlang","*","models/ed","Imports",FALSE +"rlang","*","models/ldndc","Imports",FALSE +"rlang","*","modules/assim.sequential","Imports",FALSE +"rlang","*","modules/benchmark","Imports",FALSE +"rlang","*","modules/data.land","Imports",FALSE +"rlang","*","modules/data.remote","Imports",FALSE +"rlang","*","modules/uncertainty","Imports",FALSE +"rlang",">= 0.2.0","modules/data.atmosphere","Imports",FALSE +"rlist","*","modules/assim.sequential","Suggests",FALSE +"rmarkdown","*","base/visualization","Suggests",FALSE +"rmarkdown","*","models/biocro","Suggests",FALSE +"rmarkdown","*","models/ed","Suggests",FALSE +"rmarkdown","*","models/maat","Suggests",FALSE +"rmarkdown","*","modules/data.atmosphere","Suggests",FALSE +"rmarkdown","*","modules/priors","Suggests",FALSE +"rmarkdown","*","modules/rtm","Suggests",FALSE +"rmarkdown",">= 2.19","base/db","Suggests",FALSE +"rmarkdown",">= 2.19","base/qaqc","Suggests",FALSE +"rmarkdown",">= 2.19","modules/allometry","Suggests",FALSE +"rmarkdown",">= 2.19","modules/assim.batch","Suggests",FALSE +"rmarkdown",">= 2.19","modules/meta.analysis","Suggests",FALSE +"rmarkdown",">= 2.19","modules/photosynthesis","Suggests",FALSE +"roxygen2","== 7.3.2","base/all","Roxygen",FALSE +"roxygen2","== 7.3.2","base/db","Roxygen",FALSE +"roxygen2","== 7.3.2","base/logger","Roxygen",FALSE +"roxygen2","== 7.3.2","base/qaqc","Roxygen",FALSE +"roxygen2","== 7.3.2","base/remote","Roxygen",FALSE +"roxygen2","== 7.3.2","base/settings","Roxygen",FALSE +"roxygen2","== 7.3.2","base/utils","Roxygen",FALSE +"roxygen2","== 7.3.2","base/visualization","Roxygen",FALSE +"roxygen2","== 7.3.2","base/workflow","Roxygen",FALSE +"roxygen2","== 7.3.2","models/basgra","Roxygen",FALSE +"roxygen2","== 7.3.2","models/biocro","Roxygen",FALSE +"roxygen2","== 7.3.2","models/cable","Roxygen",FALSE +"roxygen2","== 7.3.2","models/clm45","Roxygen",FALSE +"roxygen2","== 7.3.2","models/dalec","Roxygen",FALSE +"roxygen2","== 7.3.2","models/dvmdostem","Roxygen",FALSE +"roxygen2","== 7.3.2","models/ed","Roxygen",FALSE +"roxygen2","== 7.3.2","models/fates","Roxygen",FALSE +"roxygen2","== 7.3.2","models/gday","Roxygen",FALSE +"roxygen2","== 7.3.2","models/jules","Roxygen",FALSE +"roxygen2","== 7.3.2","models/ldndc","Roxygen",FALSE +"roxygen2","== 7.3.2","models/linkages","Roxygen",FALSE +"roxygen2","== 7.3.2","models/lpjguess","Roxygen",FALSE +"roxygen2","== 7.3.2","models/maat","Roxygen",FALSE +"roxygen2","== 7.3.2","models/maespa","Roxygen",FALSE +"roxygen2","== 7.3.2","models/preles","Roxygen",FALSE +"roxygen2","== 7.3.2","models/sibcasa","Roxygen",FALSE +"roxygen2","== 7.3.2","models/sipnet","Roxygen",FALSE +"roxygen2","== 7.3.2","models/stics","Roxygen",FALSE +"roxygen2","== 7.3.2","models/template","Roxygen",FALSE +"roxygen2","== 7.3.2","modules/allometry","Roxygen",FALSE +"roxygen2","== 7.3.2","modules/assim.batch","Roxygen",FALSE +"roxygen2","== 7.3.2","modules/assim.sequential","Roxygen",FALSE +"roxygen2","== 7.3.2","modules/benchmark","Roxygen",FALSE +"roxygen2","== 7.3.2","modules/data.atmosphere","Roxygen",FALSE +"roxygen2","== 7.3.2","modules/data.land","Roxygen",FALSE +"roxygen2","== 7.3.2","modules/data.remote","Roxygen",FALSE +"roxygen2","== 7.3.2","modules/emulator","Roxygen",FALSE +"roxygen2","== 7.3.2","modules/meta.analysis","Roxygen",FALSE +"roxygen2","== 7.3.2","modules/photosynthesis","Roxygen",FALSE +"roxygen2","== 7.3.2","modules/priors","Roxygen",FALSE +"roxygen2","== 7.3.2","modules/rtm","Roxygen",FALSE +"roxygen2","== 7.3.2","modules/uncertainty","Roxygen",FALSE +"RPostgres","*","base/db","Suggests",FALSE +"RPostgreSQL","*","base/db","Suggests",FALSE +"RPostgreSQL","*","models/biocro","Suggests",FALSE +"Rpreles","*","models/preles","Suggests",FALSE +"RSQLite","*","base/db","Suggests",FALSE +"sessioninfo","*","base/all","Suggests",FALSE +"sf","*","modules/assim.sequential","Suggests",FALSE +"sf","*","modules/data.atmosphere","Imports",FALSE +"sf","*","modules/data.land","Imports",FALSE +"sf","*","modules/data.remote","Suggests",FALSE +"SimilarityMeasures","*","modules/benchmark","Imports",FALSE +"sirt","*","modules/data.land","Imports",FALSE +"sp","*","base/visualization","Suggests",FALSE +"sp","*","modules/assim.sequential","Suggests",FALSE +"sp","*","modules/data.atmosphere","Imports",FALSE +"sp","*","modules/data.land","Imports",FALSE +"sp","*","modules/data.remote","Imports",FALSE +"stats","*","base/qaqc","Imports",FALSE +"stats","*","models/sipnet","Imports",FALSE +"stats","*","modules/allometry","Imports",FALSE +"stats","*","modules/assim.batch","Imports",FALSE +"stats","*","modules/assim.sequential","Suggests",FALSE +"stats","*","modules/photosynthesis","Imports",FALSE +"SticsRFiles","*","models/stics","Suggests",FALSE +"stringi","*","base/logger","Imports",FALSE +"stringi","*","base/utils","Imports",FALSE +"stringr","*","models/fates","Imports",FALSE +"stringr","*","modules/assim.sequential","Imports",FALSE +"stringr","*","modules/benchmark","Imports",FALSE +"stringr","*","modules/data.land","Imports",FALSE +"stringr",">= 1.1.0","base/visualization","Imports",FALSE +"stringr",">= 1.1.0","models/ed","Imports",FALSE +"stringr",">= 1.1.0","modules/data.atmosphere","Imports",FALSE +"suntools","*","modules/data.atmosphere","Imports",FALSE +"swfscMisc","*","modules/data.land","Imports",FALSE +"terra","*","modules/assim.sequential","Suggests",FALSE +"terra","*","modules/data.atmosphere","Imports",FALSE +"terra","*","modules/data.land","Imports",FALSE +"terra","*","modules/data.remote","Imports",FALSE +"testthat","*","base/all","Suggests",FALSE +"testthat","*","base/logger","Suggests",FALSE +"testthat","*","base/remote","Suggests",FALSE +"testthat","*","base/workflow","Suggests",FALSE +"testthat","*","modules/assim.sequential","Suggests",FALSE +"testthat","*","modules/priors","Suggests",FALSE +"testthat",">= 1.0.2","base/visualization","Suggests",FALSE +"testthat",">= 1.0.2","models/basgra","Suggests",FALSE +"testthat",">= 1.0.2","models/cable","Suggests",FALSE +"testthat",">= 1.0.2","models/clm45","Suggests",FALSE +"testthat",">= 1.0.2","models/dalec","Suggests",FALSE +"testthat",">= 1.0.2","models/dvmdostem","Suggests",FALSE +"testthat",">= 1.0.2","models/ed","Suggests",FALSE +"testthat",">= 1.0.2","models/fates","Suggests",FALSE +"testthat",">= 1.0.2","models/gday","Suggests",FALSE +"testthat",">= 1.0.2","models/jules","Suggests",FALSE +"testthat",">= 1.0.2","models/ldndc","Suggests",FALSE +"testthat",">= 1.0.2","models/linkages","Suggests",FALSE +"testthat",">= 1.0.2","models/lpjguess","Suggests",FALSE +"testthat",">= 1.0.2","models/maat","Suggests",FALSE +"testthat",">= 1.0.2","models/maespa","Suggests",FALSE +"testthat",">= 1.0.2","models/preles","Suggests",FALSE +"testthat",">= 1.0.2","models/sipnet","Suggests",FALSE +"testthat",">= 1.0.2","models/stics","Suggests",FALSE +"testthat",">= 1.0.2","models/template","Suggests",FALSE +"testthat",">= 1.0.2","modules/allometry","Suggests",FALSE +"testthat",">= 1.0.2","modules/assim.batch","Suggests",FALSE +"testthat",">= 1.0.2","modules/data.land","Suggests",FALSE +"testthat",">= 1.0.2","modules/data.remote","Suggests",FALSE +"testthat",">= 1.0.2","modules/meta.analysis","Suggests",FALSE +"testthat",">= 1.0.2","modules/rtm","Suggests",FALSE +"testthat",">= 1.0.2","modules/uncertainty","Suggests",FALSE +"testthat",">= 2.0.0","base/db","Suggests",FALSE +"testthat",">= 2.0.0","base/settings","Suggests",FALSE +"testthat",">= 2.0.0","base/utils","Suggests",FALSE +"testthat",">= 2.0.0","models/biocro","Suggests",FALSE +"testthat",">= 2.0.0","modules/benchmark","Suggests",FALSE +"testthat",">= 3.0.0","models/sibcasa","Suggests",FALSE +"testthat",">= 3.0.4","base/qaqc","Suggests",FALSE +"testthat",">= 3.1.7","modules/data.atmosphere","Suggests",FALSE +"tibble","*","base/db","Imports",FALSE +"tibble","*","models/ed","Imports",FALSE +"tibble","*","models/fates","Imports",FALSE +"tibble","*","models/lpjguess","Imports",FALSE +"tibble","*","modules/data.atmosphere","Imports",FALSE +"tibble","*","modules/data.remote","Suggests",FALSE +"tictoc","*","modules/assim.sequential","Suggests",FALSE +"tidyr","*","base/db","Imports",FALSE +"tidyr","*","models/ed","Imports",FALSE +"tidyr","*","modules/assim.sequential","Suggests",FALSE +"tidyr","*","modules/data.atmosphere","Imports",FALSE +"tidyr","*","modules/data.land","Imports",FALSE +"tidyselect","*","modules/benchmark","Imports",FALSE +"tidyselect","*","modules/data.atmosphere","Imports",FALSE +"tidyselect","*","modules/data.land","Imports",FALSE +"tidyverse","*","base/db","Suggests",FALSE +"tools","*","base/remote","Suggests",FALSE +"tools","*","modules/allometry","Imports",FALSE +"traits","*","modules/data.land","Imports",FALSE +"TruncatedNormal",">= 2.2","modules/assim.batch","Imports",FALSE +"truncnorm","*","modules/data.atmosphere","Imports",FALSE +"units","*","base/db","Imports",FALSE +"units","*","base/utils","Imports",FALSE +"units","*","modules/benchmark","Imports",FALSE +"units","*","modules/data.atmosphere","Imports",FALSE +"urltools","*","base/remote","Imports",FALSE +"utils","*","base/all","Imports",FALSE +"utils","*","base/logger","Imports",FALSE +"utils","*","models/ed","Imports",FALSE +"utils","*","models/linkages","Imports",FALSE +"utils","*","modules/allometry","Imports",FALSE +"utils","*","modules/assim.batch","Imports",FALSE +"utils","*","modules/assim.sequential","Suggests",FALSE +"utils","*","modules/benchmark","Imports",FALSE +"utils","*","modules/data.remote","Suggests",FALSE +"utils","*","modules/photosynthesis","Imports",FALSE +"vdiffr",">= 1.0.2","base/qaqc","Suggests",FALSE +"withr","*","base/db","Suggests",FALSE +"withr","*","base/logger","Suggests",FALSE +"withr","*","base/qaqc","Suggests",FALSE +"withr","*","base/remote","Suggests",FALSE +"withr","*","base/settings","Suggests",FALSE +"withr","*","base/utils","Suggests",FALSE +"withr","*","base/visualization","Suggests",FALSE +"withr","*","base/workflow","Suggests",FALSE +"withr","*","models/basgra","Suggests",FALSE +"withr","*","models/ed","Suggests",FALSE +"withr","*","models/sibcasa","Suggests",FALSE +"withr","*","modules/allometry","Suggests",FALSE +"withr","*","modules/data.atmosphere","Suggests",FALSE +"XML","*","base/workflow","Imports",FALSE +"XML","*","models/biocro","Imports",FALSE +"XML","*","models/maat","Imports",FALSE +"XML","*","models/stics","Imports",FALSE +"XML","*","modules/assim.batch","Imports",FALSE +"XML","*","modules/assim.sequential","Suggests",FALSE +"XML","*","modules/data.remote","Imports",FALSE +"XML","*","modules/rtm","Suggests",FALSE +"XML",">= 3.98-1.3","base/settings","Imports",FALSE +"XML",">= 3.98-1.4","models/ed","Imports",FALSE +"XML",">= 3.98-1.4","modules/benchmark","Imports",FALSE +"XML",">= 3.98-1.4","modules/data.atmosphere","Imports",FALSE +"XML",">= 3.98-1.4","modules/data.land","Imports",FALSE +"xtable","*","base/utils","Suggests",FALSE +"xts","*","modules/data.atmosphere","Imports",FALSE +"zoo","*","modules/benchmark","Imports",FALSE +"zoo","*","modules/data.atmosphere","Imports",FALSE diff --git a/docker/docker-compose.example.yml b/docker/docker-compose.example.yml index fd649f7bc93..5ef88fc2402 100644 --- a/docker/docker-compose.example.yml +++ b/docker/docker-compose.example.yml @@ -1,7 +1,3 @@ -# you need a version entry, and this should be the same version as the -# docker-compose.yml file. -version: "3" - # if you change any of the services you will need the services header. services: diff --git a/docker/docs/Dockerfile b/docker/docs/Dockerfile index 692b09c3287..315769eed57 100644 --- a/docker/docs/Dockerfile +++ b/docker/docs/Dockerfile @@ -5,11 +5,15 @@ ARG IMAGE_VERSION="latest" # compile bookdown to html # ---------------------------------------------------------------------- FROM pecan/base:${IMAGE_VERSION} AS pecandocs -MAINTAINER Rob Kooper RUN apt-get update \ && apt-get install -y --no-install-recommends pandoc \ - && install2.r -e -s -n -1 bookdown \ + && Rscript \ + -e 'repos <- c(getOption("repos"),' \ + -e ' sub(r"(\d{4}-\d{2}-\d{2})", "latest", getOption("repos")))' \ + -e 'remotes::install_version("rmarkdown", ">= 2.19", dependencies = TRUE, upgrade = FALSE, repos = repos)' \ + -e 'remotes::install_version("knitr", ">= 1.42", dependencies = TRUE, upgrade = FALSE, repos = repos)' \ + -e 'remotes::install_version("bookdown", ">= 0.31", dependencies = TRUE, upgrade = FALSE, repos = repos)' \ && rm -rf /var/lib/apt/lists/* WORKDIR /src/book_source/ @@ -23,7 +27,11 @@ RUN make build # copy html pages to container # ---------------------------------------------------------------------- FROM httpd -MAINTAINER Rob Kooper + +# need curl for health checks +RUN apt-get update \ + && apt-get install -y curl \ + && rm -rf /var/lib/apt/lists/* COPY docker/docs/index.html /usr/local/apache2/htdocs/ COPY --from=pecandocs /src/book_source/_book/ /usr/local/apache2/htdocs/docs/pecan/ diff --git a/docker/executor/Dockerfile b/docker/executor/Dockerfile index 34722783909..19cecced3cd 100644 --- a/docker/executor/Dockerfile +++ b/docker/executor/Dockerfile @@ -5,7 +5,6 @@ ARG IMAGE_VERSION="latest" # PECAN FOR MODEL BASE IMAGE # ---------------------------------------------------------------------- FROM pecan/base:${IMAGE_VERSION} -MAINTAINER Rob Kooper # ---------------------------------------------------------------------- # SETUP FOR PYTHON CODE @@ -24,4 +23,4 @@ ENV RABBITMQ_URI="amqp://guest:guest@rabbitmq/%2F" \ # actual application that will be executed COPY executor.py /work/ -CMD python3 /work/executor.py +CMD ["python3", "/work/executor.py"] diff --git a/docker/models/Dockerfile b/docker/models/Dockerfile index 1870fba9a03..f76f51382e1 100644 --- a/docker/models/Dockerfile +++ b/docker/models/Dockerfile @@ -5,7 +5,6 @@ ARG IMAGE_VERSION="latest" # PECAN FOR MODEL BASE IMAGE # ---------------------------------------------------------------------- FROM pecan/base:${IMAGE_VERSION} -MAINTAINER Rob Kooper # ---------------------------------------------------------------------- # SETUP FOR PYTHON CODE @@ -23,4 +22,4 @@ ENV RABBITMQ_URI="amqp://guest:guest@rabbitmq/%2F" \ # actual application that will be executed COPY model.py /work/ -CMD python3 /work/model.py +CMD ["python3", "/work/model.py"] diff --git a/docker/monitor/Dockerfile b/docker/monitor/Dockerfile index ea776f08796..ec479cc2458 100644 --- a/docker/monitor/Dockerfile +++ b/docker/monitor/Dockerfile @@ -1,4 +1,4 @@ -FROM python:3.5 +FROM python:3.12 ENV RABBITMQ_URI="amqp://guest:guest@rabbitmq/%2F" \ RABBITMQ_MGMT_PORT="15672" \ @@ -17,4 +17,4 @@ COPY requirements.txt /src/ RUN pip3 install -r /src/requirements.txt COPY . /src/ -CMD python3 monitor.py +CMD ["python3", "./monitor.py"] diff --git a/docker/monitor/requirements.txt b/docker/monitor/requirements.txt index cf6031f1fae..d5537687d4d 100644 --- a/docker/monitor/requirements.txt +++ b/docker/monitor/requirements.txt @@ -1,4 +1,4 @@ -pika==1.0.0 -requests==2.21.0 -psycopg2-binary==2.7.7 -python-dateutil==2.8.0 +pika==1.3.2 +requests==2.32.0 +psycopg2-binary==2.9.9 +python-dateutil==2.8.2 diff --git a/docker/rstudio-nginx/Dockerfile b/docker/rstudio-nginx/Dockerfile index 600c1da6484..af33c2c3d71 100644 --- a/docker/rstudio-nginx/Dockerfile +++ b/docker/rstudio-nginx/Dockerfile @@ -1,4 +1,3 @@ FROM nginx:alpine -MAINTAINER Rob Kooper COPY nginx.conf /etc/nginx/conf.d/default.conf diff --git a/docker/web/Dockerfile b/docker/web/Dockerfile index 69610652342..9382561e6e7 100644 --- a/docker/web/Dockerfile +++ b/docker/web/Dockerfile @@ -1,5 +1,4 @@ -FROM php:7-apache -MAINTAINER Rob Kooper +FROM php:8-apache # ---------------------------------------------------------------------- # install rabbitmq and postgresql extentions diff --git a/docker/web/config.docker.php b/docker/web/config.docker.php index 4c601438c97..a54fb0e7fff 100644 --- a/docker/web/config.docker.php +++ b/docker/web/config.docker.php @@ -16,10 +16,6 @@ $db_fia_password=""; $db_fia_database=""; -# browdog information -$browndog_url=""; -$browndog_username=""; -$browndog_password=""; # R binary $Rbinary="/usr/bin/R"; diff --git a/documentation/README.md b/documentation/README.md index f802adf0a73..b457094e574 100644 --- a/documentation/README.md +++ b/documentation/README.md @@ -1,3 +1,5 @@ -This folder contains published articles describing the development and application of PEcAn as well as tutorials. +# Readme.md -The full documentation can be found in the book_source directory, and is published at https://pecanproject.github.io/pecan-documentation/ with each new release. +This folder contains published articles describing the development and application of PEcAn as well as tutorials. + +The full documentation can be found in the book_source directory, and is published at with each new release. diff --git a/documentation/index_vm.html b/documentation/index_vm.html index d6d3166157a..562f92d88f6 100644 --- a/documentation/index_vm.html +++ b/documentation/index_vm.html @@ -30,7 +30,7 @@

Documentation

The documentation for PEcAn is rendered using bookdown. A PDF version can be found locally. The most up to date version can be found at - our website. diff --git a/documentation/tutorials/01_Demo_Basic_Run/Demo01.Rmd b/documentation/tutorials/01_Demo_Basic_Run/Demo01.Rmd index 97ec998211f..57fbc7db351 100644 --- a/documentation/tutorials/01_Demo_Basic_Run/Demo01.Rmd +++ b/documentation/tutorials/01_Demo_Basic_Run/Demo01.Rmd @@ -155,7 +155,7 @@ The entire PEcAn team welcomes any questions you may have! **If the Finished Stage has a Status of “DONE”, congratulations!** If you got this far, you have managed to run an ecosystem model without ever touching a line of code! Now it’s time to look at the results **click Finished**. -FYI, [adding a new model](https://pecanproject.github.io/pecan-documentation/master/adding-an-ecosystem-model.html) to PEcAn does not require modification of the model’s code, just the implementation of a wrapper function. +FYI, [adding a new model](https://pecanproject.github.io/pecan-documentation/latest/adding-an-ecosystem-model.html) to PEcAn does not require modification of the model’s code, just the implementation of a wrapper function. #### Output and Visualization diff --git a/documentation/tutorials/02_Demo_Uncertainty_Analysis/Demo02.Rmd b/documentation/tutorials/02_Demo_Uncertainty_Analysis/Demo02.Rmd index b560ff39258..f9164088852 100644 --- a/documentation/tutorials/02_Demo_Uncertainty_Analysis/Demo02.Rmd +++ b/documentation/tutorials/02_Demo_Uncertainty_Analysis/Demo02.Rmd @@ -97,12 +97,12 @@ The [next set of tutorials](#demo-table) will focus on the process of data assim #### Assimilation 'by hand' -[Explore](https://github.com/PecanProject/pecan/blob/master/documentation/tutorials/sensitivity/PEcAn_sensitivity_tutorial_v1.0.Rmd) how model error changes as a function of parameter value (i.e. data assimilation ‘by hand’) +[Explore](https://github.com/PecanProject/pecan/blob/main/documentation/tutorials/sensitivity/PEcAn_sensitivity_tutorial_v1.0.Rmd) how model error changes as a function of parameter value (i.e. data assimilation ‘by hand’) #### MCMC Concepts -[Explore](https://github.com/PecanProject/pecan/blob/master/documentation/tutorials/MCMC/MCMC_Concepts.Rmd) Bayesian MCMC concepts using the photosynthesis module +[Explore](https://github.com/PecanProject/pecan/blob/main/documentation/tutorials/MCMC/MCMC_Concepts.Rmd) Bayesian MCMC concepts using the photosynthesis module #### More info about tools, analyses, and specific tasks… diff --git a/documentation/tutorials/AnalyzeOutput/bety b/documentation/tutorials/AnalyzeOutput/bety deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/documentation/tutorials/MCMC/MCMC_Concepts.Rmd b/documentation/tutorials/MCMC/MCMC_Concepts.Rmd index 162054f6ce0..46abe98f455 100644 --- a/documentation/tutorials/MCMC/MCMC_Concepts.Rmd +++ b/documentation/tutorials/MCMC/MCMC_Concepts.Rmd @@ -78,7 +78,7 @@ plot(dat$Ci[aci],dat$Photo[aci],main="ACi") plot(dat$PARi[!aci],dat$Photo[!aci],main="AQ") ``` -In PEcAn we've written a wrapper function, $fitA$, around the statistical model discussed above, which has a number of other bells and whistles discussed in the [PEcAn Photosynthesis Vignette](https://github.com/PecanProject/pecan/blob/master/modules/photosynthesis/vignettes/ResponseCurves.Rmd). For today we'll just use the most basic version, which takes as arguments the data and the number of MCMC iterations we want to run. +In PEcAn we've written a wrapper function, $fitA$, around the statistical model discussed above, which has a number of other bells and whistles discussed in the [PEcAn Photosynthesis Vignette](https://github.com/PecanProject/pecan/blob/main/modules/photosynthesis/vignettes/ResponseCurves.Rmd). For today we'll just use the most basic version, which takes as arguments the data and the number of MCMC iterations we want to run. ```{r echo=TRUE, eval=FALSE} fit <- fitA(dat,model=list(n.iter=10000)) @@ -179,7 +179,7 @@ Note: on the last figure you will get warnings about "No ACi" and "No AQ" which #### Additional information -There is a more detailed R Vignette on the use of the PEcAn photosynthesis module available in the [PEcAn Repository](https://github.com/PecanProject/pecan/blob/master/modules/photosynthesis/vignettes/ResponseCurves.Rmd). +There is a more detailed R Vignette on the use of the PEcAn photosynthesis module available in the [PEcAn Repository](https://github.com/PecanProject/pecan/blob/main/modules/photosynthesis/vignettes/ResponseCurves.Rmd). #### Citations diff --git a/documentation/tutorials/deploy.sh b/documentation/tutorials/deploy.sh index a4497227e55..434fadcaf5f 100755 --- a/documentation/tutorials/deploy.sh +++ b/documentation/tutorials/deploy.sh @@ -40,4 +40,4 @@ done < buildfiles cd book_hosted git add --all * git commit -m "Update tutorials `date`" || true -git push -q origin master +git push -q origin latest diff --git a/get-v8-linux.sh b/get-v8-linux.sh deleted file mode 100644 index 5ed00376716..00000000000 --- a/get-v8-linux.sh +++ /dev/null @@ -1,53 +0,0 @@ -download_libs() { -# On debian CI we want to test against system libv8 -if [ "$USER" = "salsaci" ]; then - return; -fi - -# Gets the R target architecture in case of qemu-containers, e.g -# https://hub.docker.com/r/i386/debian -# Which reports uname -m: x86_64 (only i386 seems to have this issue) -RARCH=$(${R_HOME}/bin/Rscript -e 'cat(R.Version()$arch)') -case $RARCH in - x86_64 | arm64 | aarch64) - echo "Target architecture: $RARCH" - ;; - *) - echo "Unexpected architecture: $RARCH" - return; - ;; -esac - -# RHDT compilers are using an older libc++ -# https://github.com/jeroen/V8/issues/137 -if test -f "/etc/redhat-release" && grep -Fq "release 7" "/etc/redhat-release"; then -IS_CENTOS7=1 -fi - -IS_MUSL=$(ldd --version 2>&1 | grep musl) -if [ $? -eq 0 ] && [ "$IS_MUSL" ]; then - URL="https://github.com/jeroen/V8/releases/download/v3.6.0/v8-9.6.180.12-alpine.tar.gz" -elif [ "$RARCH" = "arm64" ] || [ "$RARCH" = "aarch64" ]; then - URL="https://github.com/jeroen/V8/releases/download/v3.6.0/v8-9.6.180.12-arm64.tar.gz" -else - IS_GCC4=$($CXX --version | grep -P '^g++.*[^\d.]4(\.\d){2}') - if [ $? -eq 0 ] && [ "$IS_GCC4" ]; then - URL="https://github.com/jeroen/V8/releases/download/v3.6.0/v8-6.8.275.32-gcc-4.8.tar.gz" - elif [ "$IS_CENTOS7" ]; then - URL="https://github.com/jeroen/V8/releases/download/v3.6.0/v8-6.8.275.32-gcc-4.8.tar.gz" - else - URL="https://github.com/jeroen/V8/releases/download/v3.6.0/v8-9.6.180.12-amd64.tar.gz" - fi -fi -if [ ! -f ".deps/lib/libv8_monolith.a" ]; then - ${R_HOME}/bin/R -q -e "curl::curl_download('$URL','libv8.tar.gz',quiet=FALSE)" - tar xzf libv8.tar.gz - rm -f libv8.tar.gz - mv v8 .deps -fi -PKG_CFLAGS="-I${PWD}/.deps/include" -PKG_LIBS="-L${PWD}/.deps/lib -lv8_monolith" -} - -download_libs - diff --git a/models/basgra/DESCRIPTION b/models/basgra/DESCRIPTION index 818d5ce141e..f34e3f69db1 100644 --- a/models/basgra/DESCRIPTION +++ b/models/basgra/DESCRIPTION @@ -1,12 +1,12 @@ Package: PEcAn.BASGRA Type: Package Title: PEcAn Package for Integration of the BASGRA Model -Version: 1.7.2 -Date: 2021-10-04 +Version: 1.8.0.9000 Authors@R: c(person("Istem", "Fer", role = c("aut", "cre"), email = "istem.fer@fmi.fi"), person("University of Illinois, NCSA", role = c("cph"))) Description: This module provides functions to link the BASGRA model to PEcAn. +Depends: R (>= 4.0.0) Imports: PEcAn.logger, PEcAn.data.atmosphere, @@ -14,7 +14,8 @@ Imports: lubridate, ncdf4, Suggests: - testthat (>= 1.0.2) + testthat (>= 1.0.2), + withr OS_type: unix SystemRequirements: GNU Fortran License: BSD_3_clause + file LICENSE @@ -22,4 +23,4 @@ Copyright: Authors LazyLoad: yes LazyData: FALSE Encoding: UTF-8 -RoxygenNote: 7.2.3 +RoxygenNote: 7.3.2 diff --git a/models/basgra/LICENSE b/models/basgra/LICENSE index 5a9e44128f1..09ef35a60b4 100644 --- a/models/basgra/LICENSE +++ b/models/basgra/LICENSE @@ -1,34 +1,3 @@ -## This is the master copy of the PEcAn License - -University of Illinois/NCSA Open Source License - -Copyright (c) 2012, University of Illinois, NCSA. All rights reserved. - -PEcAn project -www.pecanproject.org - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal with the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -- Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimers. -- Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimers in the - documentation and/or other materials provided with the distribution. -- Neither the names of University of Illinois, NCSA, nor the names - of its contributors may be used to endorse or promote products - derived from this Software without specific prior written permission. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR -ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF -CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE SOFTWARE. - +YEAR: 2024 +COPYRIGHT HOLDER: PEcAn Project +ORGANIZATION: PEcAn Project, authors affiliations diff --git a/models/basgra/NEWS.md b/models/basgra/NEWS.md new file mode 100644 index 00000000000..ffb27ef758a --- /dev/null +++ b/models/basgra/NEWS.md @@ -0,0 +1,7 @@ +# PEcAn.BASGRA 1.8.0.9000 + +## License change +* PEcAn.BASGRA is now distributed under the BSD three-clause license instead of the NCSA Open Source license. + +## Added +* Added a `NEWS.md` file to track changes to the package. Prior to this point changes are tracked in the main CHANGELOG for the PEcAn repository. diff --git a/models/basgra/R/read_restart.BASGRA.R b/models/basgra/R/read_restart.BASGRA.R index 3c4f6af0538..4020b57bde1 100644 --- a/models/basgra/R/read_restart.BASGRA.R +++ b/models/basgra/R/read_restart.BASGRA.R @@ -41,6 +41,11 @@ read_restart.BASGRA <- function(outdir, runid, stop.time, settings, var.names, p names(forecast[[length(forecast)]]) <- c("slow_soil_pool_carbon_content") } + if ("soil_organic_nitrogen_content" %in% var.names) { + forecast[[length(forecast) + 1]] <- ens$soil_nitrogen_content[last] # kg N m-2 + names(forecast[[length(forecast)]]) <- c("soil_nitrogen_content") + } + if ("TotSoilCarb" %in% var.names) { forecast[[length(forecast) + 1]] <- ens$TotSoilCarb[last] # kg C m-2 names(forecast[[length(forecast)]]) <- c("TotSoilCarb") @@ -51,6 +56,11 @@ read_restart.BASGRA <- function(outdir, runid, stop.time, settings, var.names, p names(forecast[[length(forecast)]]) <- c("NEE") } + if ("NPP" %in% var.names) { + forecast[[length(forecast) + 1]] <- ens$NPP[last] # kg C m-2 s-1 + names(forecast[[length(forecast)]]) <- c("NPP") + } + if ("Qle" %in% var.names) { forecast[[length(forecast) + 1]] <- ens$Qle[last] # W m-2 names(forecast[[length(forecast)]]) <- c("Qle") @@ -127,6 +137,16 @@ read_restart.BASGRA <- function(outdir, runid, stop.time, settings, var.names, p names(forecast[[length(forecast)]]) <- c("phenological_stage") } + if ("SoilMoistFrac" %in% var.names) { + forecast[[length(forecast) + 1]] <- ens$SoilMoistFrac[last] + names(forecast[[length(forecast)]]) <- c("SoilMoistFrac") + } + + if ("harvest_carbon_flux" %in% var.names) { + forecast[[length(forecast) + 1]] <- ens$harvest_carbon_flux[last] # kg C m-2 s-1 + names(forecast[[length(forecast)]]) <- c("harvest_carbon_flux") + } + PEcAn.logger::logger.info(runid) X_tmp <- list(X = unlist(forecast), params = params) diff --git a/models/basgra/R/run_BASGRA.R b/models/basgra/R/run_BASGRA.R index 13ee6614d21..00d178e2c99 100644 --- a/models/basgra/R/run_BASGRA.R +++ b/models/basgra/R/run_BASGRA.R @@ -20,14 +20,15 @@ ##' @param sitelat latitude of the site ##' @param sitelon longitude of the site ##' @param co2_file path to daily atmospheric CO2 concentration file, optional, defaults to 350 ppm when missing +##' @param write_raw_output write raw output in csv or not ##' ##' @export ##' @useDynLib PEcAn.BASGRA, .registration = TRUE -##' @author Istem Fer +##' @author Istem Fer, Julius Vira ##-------------------------------------------------------------------------------------------------# run_BASGRA <- function(run_met, run_params, site_harvest, site_fertilize, start_date, end_date, outdir, - sitelat, sitelon, co2_file = NULL){ + sitelat, sitelon, co2_file = NULL, write_raw_output = FALSE){ start_date <- as.POSIXlt(start_date, tz = "UTC") if(lubridate::hour(start_date) == 23){ @@ -38,7 +39,7 @@ run_BASGRA <- function(run_met, run_params, site_harvest, site_fertilize, start_ start_year <- lubridate::year(start_date) end_year <- lubridate::year(end_date) - if(co2_file == "NULL") co2_file <- NULL + if(length(co2_file) > 0 && co2_file == "NULL") co2_file <- NULL ################################################################################ ### FUNCTIONS FOR READING WEATHER DATA mini_met2model_BASGRA <- function(file_path, @@ -65,7 +66,6 @@ run_BASGRA <- function(run_met, run_params, site_harvest, site_fertilize, start_ } - NDAYS <- length(simdays) NWEATHER <- as.integer(9) matrix_weather <- matrix( 0., nrow = NDAYS, ncol = NWEATHER ) @@ -76,15 +76,14 @@ run_BASGRA <- function(run_met, run_params, site_harvest, site_fertilize, start_ matrix_weather[ ,1] <- rep(year, NDAYS) # year matrix_weather[ ,2] <- simdays - - if(grepl(year, basename(file_path))){ + + if(endsWith(file_path, '.nc')){ # we probably have a (near-term) forecast met old.file <- file_path }else{ old.file <- file.path(dirname(file_path), paste(basename(file_path), year, "nc", sep = ".")) } - if (file.exists(old.file)) { ## open netcdf @@ -102,28 +101,31 @@ run_BASGRA <- function(run_met, run_params, site_harvest, site_fertilize, start_ ind <- rep(simdays, each = tstep) if(unlist(strsplit(nc$dim$time$units, " "))[1] %in% c("days", "day")){ - #this should always be the case, but just in case + #this should always be the case, butorigin just in case origin_dt <- (as.POSIXct(unlist(strsplit(nc$dim$time$units, " "))[3], "%Y-%m-%d", tz="UTC") + 60*60*24) - dt - ydays <- lubridate::yday(origin_dt + sec) - - }else{ + # below -dt means that midnights belong to the day that ends. This is consistent + # with data files which are exclusive of the 1 Jan midnight + dt till 1 Jan next year. + # ydays <- lubridate::yday(origin_dt + sec - dt) + ydays <- lubridate::yday(origin_dt + sec) + all_days <- origin_dt + sec + } else { PEcAn.logger::logger.error("Check units of time in the weather data.") } - rad <- ncdf4::ncvar_get(nc, "surface_downwelling_shortwave_flux_in_air") gr <- rad * 0.0864 # W m-2 to MJ m-2 d-1 # temporary hack, not sure if it will generalize with other data products # function might need a splitting arg - gr <- gr[ydays %in% simdays] - + gr <- gr[(ydays %in% simdays) & (lubridate::year(all_days) == year)] + if (length(ind) > length(gr)) { + PEcAn.logger::logger.severe('The input does not cover the requested simulation period') + } matrix_weather[ ,3] <- round(tapply(gr, ind, mean, na.rm = TRUE), digits = 2) # irradiation (MJ m-2 d-1) Tair <- ncdf4::ncvar_get(nc, "air_temperature") ## in Kelvin - Tair <- Tair[ydays %in% simdays] + Tair <- Tair[(ydays %in% simdays) & (lubridate::year(all_days) == year)] Tair_C <- PEcAn.utils::ud_convert(Tair, "K", "degC") - #in BASGRA tmin and tmax is only used to calculate the average daily temperature, see environment.f90 t_dmean <- round(tapply(Tair_C, ind, mean, na.rm = TRUE), digits = 2) # maybe round these numbers t_dmin <- round(tapply(Tair_C, ind, min, na.rm = TRUE), digits = 2) @@ -132,7 +134,7 @@ run_BASGRA <- function(run_met, run_params, site_harvest, site_fertilize, start_ matrix_weather[ ,5] <- t_dmax # that's what they had in read_weather_Bioforsk RH <- ncdf4::ncvar_get(nc, "relative_humidity") # % - RH <- RH[ydays %in% simdays] + RH <- RH[(ydays %in% simdays) & (lubridate::year(all_days) == year)] RH <- round(tapply(RH, ind, mean, na.rm = TRUE), digits = 2) # This is vapor pressure according to BASGRA.f90#L86 and environment.f90#L49 @@ -140,19 +142,19 @@ run_BASGRA <- function(run_met, run_params, site_harvest, site_fertilize, start_ # TODO: check these Rain <- ncdf4::ncvar_get(nc, "precipitation_flux") # kg m-2 s-1 - Rain <- Rain[ydays %in% simdays] + Rain <- Rain[(ydays %in% simdays) & (lubridate::year(all_days) == year)] raini <- tapply(Rain*86400, ind, mean, na.rm = TRUE) matrix_weather[ ,7] <- round(raini, digits = 2) # precipitation (mm d-1) U <- try(ncdf4::ncvar_get(nc, "eastward_wind")) V <- try(ncdf4::ncvar_get(nc, "northward_wind")) if(is.numeric(U) & is.numeric(V)){ - U <- U[ydays %in% simdays] - V <- V[ydays %in% simdays] + U <- U[(ydays %in% simdays) & (lubridate::year(all_days) == year)] + V <- V[(ydays %in% simdays) & (lubridate::year(all_days) == year)] ws <- sqrt(U ^ 2 + V ^ 2) }else{ ws <- try(ncdf4::ncvar_get(nc, "wind_speed")) - ws <- ws[ydays %in% simdays] + ws <- ws[(ydays %in% simdays) & (lubridate::year(all_days) == year)] if (is.numeric(ws)) { PEcAn.logger::logger.info("eastward_wind and northward_wind absent; using wind_speed") }else{ @@ -160,13 +162,12 @@ run_BASGRA <- function(run_met, run_params, site_harvest, site_fertilize, start_ } } - matrix_weather[ ,8] <- round(tapply(ws, ind, mean, na.rm = TRUE), digits = 2) # mean wind speed (m s-1) # CO2 co2 <- try(ncdf4::ncvar_get(nc, "mole_fraction_of_carbon_dioxide_in_air")) if(is.numeric(co2)){ - co2 <- co2[ydays %in% simdays] / 1e-06 # ppm + co2 <- co2[(ydays %in% simdays) & (lubridate::year(all_days) == year)] / 1e-06 # ppm co2 <- round(tapply(co2, ind, mean, na.rm = TRUE), digits = 2) }else{ co2 <- NA @@ -177,7 +178,7 @@ run_BASGRA <- function(run_met, run_params, site_harvest, site_fertilize, start_ ncdf4::nc_close(nc) } else { - PEcAn.logger::logger.info("File for year", year, "not found. Skipping to next year") + PEcAn.logger::logger.info("File for year", year, old.file, "not found. Skipping to next year") next } @@ -231,7 +232,11 @@ run_BASGRA <- function(run_met, run_params, site_harvest, site_fertilize, start_ "NSOURCE" , "NSINK" , # 96:97 "NRT" , "NCRT" , # 98:99 "rNLITT" , "rNSOMF" , # 100:101 - "DAYL" , "EVAP" , "TRAN" # 102:104 + "DAYL" , "EVAP" , "TRAN" , "FLITTC_LEAF", # 102:105 + "FLITTC_ROOT", "NEE" , "FHARVC" , "FRUNOFFC", # 106:109 + "CSOM_A" , "CSOM_W" , "CSOM_E" , "CSOM_N", # 110:113 + "CSOM_H" , "NSOM" , "TEMPR30" , "PRECIP30", # 114:117 + "FSOILAMDC" # 118 ) outputUnits <- c( @@ -260,11 +265,16 @@ run_BASGRA <- function(run_met, run_params, site_harvest, site_fertilize, start_ "(g N m-2 d-1)", "(g N m-2 d-1)", # 96:97 "(g N m-2)" , "(g N g-1 C)" , # 98:99 "(g N m-2)" , "(g N g-1 C)" , # 100:101 - "(d d-1)" , "(mm d-1)" , "(mm d-1)" # 102:104 + "(d d-1)" , "(mm d-1)" , "(mm d-1)" , "(g C m-2 d-1)", # 102:105 + "(g C m-2 d-1)", "(g C m-2 d-1)", "(g C m-2 d-1)" , "(g C m-2 d-1)", # 106:109 + "(g C m-2)" , "(g C m-2)" , "(g C m-2)" , "(g C m-2)", # 110:113 + "(g C m-2)" , "(g N m-2)" , "(degC)" , "(mm)", # 114:117 + "(g C m-2 d-1)" # 118 ) NOUT <- as.integer( length(outputNames) ) + if (length(outputUnits) != NOUT) { PEcAn.logger::logger.severe('#outputNames != #outputUnits') } ############################# SITE CONDITIONS ######################## # this part corresponds to initialise_BASGRA_***.R functions @@ -294,12 +304,21 @@ run_BASGRA <- function(run_met, run_params, site_harvest, site_fertilize, start_ # write.table(matrix_weather[1:NDAYS,], file=paste0(outdir,"/clim",start_date,".",substr(end_date, 1,10),".csv"), # sep=",", row.names = FALSE, col.names=FALSE) - calendar_fert <- matrix( 0, nrow=300, ncol=3 ) + calendar_fert <- matrix( 0, nrow=300, ncol=6) - # read in harvest days + # read in fertilization f_days <- as.matrix(utils::read.table(site_fertilize, header = TRUE, sep = ",")) - calendar_fert[1:nrow(f_days),] <- f_days - + if (ncol(f_days) == 3) { + # old-style fertilization file + calendar_fert[1:nrow(f_days),1:3] <- f_days + } else { + if (ncol(f_days) != 6) { + PEcAn.logger::logger.severe(sprintf('Wrong number of columns (%i) in fertilization file', ncol(f_days))) + } + columns <- c('year', 'doy', 'Nmin', 'Norg', 'C_soluble', 'C_compost') + calendar_fert[1:nrow(f_days),] <- f_days[,columns] + } + calendar_Ndep <- matrix( 0, nrow=300, ncol=3 ) #calendar_Ndep[1,] <- c(1900, 1,0) #calendar_Ndep[2,] <- c(2100, 366, 0) @@ -310,37 +329,70 @@ run_BASGRA <- function(run_met, run_params, site_harvest, site_fertilize, start_ calendar_Ndep[2,] <- c( 1980, 366, 0*1000/(10000*365) ) # 0 kg N ha-1 y-1 N-deposition in 1980 calendar_Ndep[3,] <- c( 2100, 366, 0*1000/(10000*365) ) # 0 kg N ha-1 y-1 N-deposition in 2100 - days_harvest <- matrix(as.integer(-1), nrow= 300, ncol = 3) + harvest_params <- matrix(0.0, nrow=300, ncol=2) + df_harvest <- utils::read.csv(site_harvest) + n_events <- nrow(df_harvest) + allowed_harv_colnames <- c('year', 'doy', 'CLAIV', 'cut_only') + if (!all(colnames(df_harvest) %in% allowed_harv_colnames)) { + PEcAn.logger::logger.severe(c('Bad column names in harvest file: ', colnames(df_harvest))) + } + days_harvest <- matrix(as.integer(-1), nrow= 300, ncol = 2) + if (n_events > 0) { + days_harvest[1:n_events,1:2] <- as.matrix(df_harvest[,c('year', 'doy')]) + } + if ('CLAIV' %in% colnames(df_harvest)) { + harvest_params[1:n_events,1] <- df_harvest$CLAIV + } else { # default + harvest_params[1:n_events,1] <- run_params[names(run_params) == "CLAIV"] + } + if ('cut_only' %in% colnames(df_harvest)) { + harvest_params[1:n_events,2] <- df_harvest$cut_only + } else { + harvest_params[1:n_events,2] <- 0.0 + } # read in harvest days - h_days <- as.matrix(utils::read.table(site_harvest, header = TRUE, sep = ",")) - days_harvest[1:nrow(h_days),1:2] <- h_days[,1:2] + #h_days <- as.matrix(utils::read.table(site_harvest, header = TRUE, sep = ",")) + #days_harvest[1:nrow(h_days),1:2] <- h_days[,1:2] # This is a management specific parameter # CLAIV is used to determine LAI remaining after harvest # I modified BASGRA code to use different values for different harvests # I'll pass it via harvest file as the 3rd column # but just in case users forgot to add the third column to the harvest file: - if(ncol(h_days) == 3){ - days_harvest[1:nrow(h_days),3] <- h_days[,3]*10 # as.integer - }else{ - PEcAn.logger::logger.info("CLAIV not provided via harvest file. Using defaults.") - days_harvest[1:nrow(h_days),3] <- run_params[names(run_params) == "CLAIV"] - } - days_harvest <- as.integer(days_harvest) + #if(ncol(h_days) == 3){ + # days_harvest[1:nrow(h_days),3] <- h_days[,3]*10 # as.integer + #}else{ + # PEcAn.logger::logger.info("CLAIV not provided via harvest file. Using defaults.") + # days_harvest[1:nrow(h_days),3] <- run_params[names(run_params) == "CLAIV"] + #} + #days_harvest <- as.integer(days_harvest) # run model + NPARAMS = as.integer(160) # from set_params.f90 + if (length(run_params) != NPARAMS) { + PEcAn.logger::logger.severe(sprintf('%i parameters required, %i given', NPARAMS, length(run_params))) + } + if (NOUT < 118) { # from BASGRA.f90 + PEcAn.logger::logger.severe("at least 118 parameters required,", NOUT, "given") + } + + output <- .Fortran('BASGRA', run_params, matrix_weather, calendar_fert, calendar_Ndep, - days_harvest, + as.integer(days_harvest), + harvest_params, + NPARAMS, NDAYS, NOUT, - matrix(0, NDAYS, NOUT))[[8]] - + matrix(0, NDAYS, NOUT))[[10]] # for now a hack to write other states out - save(output, file = file.path(outdir, "output_basgra.Rdata")) + # save(output, file = file.path(outdir, "output_basgra.Rdata")) + if (write_raw_output) { + utils::write.csv(stats::setNames(as.data.frame(output), outputNames), file.path(outdir, "output_basgra.csv")) + } last_vals <- output[nrow(output),] names(last_vals) <- outputNames save(last_vals, file = file.path(outdir, "last_vals_basgra.Rdata")) @@ -352,6 +404,10 @@ run_BASGRA <- function(run_met, run_params, site_harvest, site_fertilize, start_ sec_in_day <- 86400 years <- seq(start_year, end_year) + + # Having the Yasso soil affects how some C pools are aggregated + have_yasso <- run_params[137] > 0 + for (y in years) { thisyear <- output[ , outputNames == "year"] == y @@ -382,15 +438,24 @@ run_BASGRA <- function(run_met, run_params, site_harvest, site_fertilize, start_ clvd <- output[thisyear, which(outputNames == "CLVD")] # (g C m-2) outlist[[length(outlist)+1]] <- PEcAn.utils::ud_convert(clvd, "g m-2", "kg m-2") - - csomf <- output[thisyear, which(outputNames == "CSOMF")] # (g C m-2) - outlist[[length(outlist)+1]] <- PEcAn.utils::ud_convert(csomf, "g m-2", "kg m-2") - - csoms <- output[thisyear, which(outputNames == "CSOMS")] # (g C m-2) - outlist[[length(outlist)+1]] <- PEcAn.utils::ud_convert(csoms, "g m-2", "kg m-2") - + + if (have_yasso) { + csomf <- rowSums(output[thisyear, outputNames %in% c('CSOM_A', 'CSOM_W', 'CSOM_E', 'CSOM_N'), drop=FALSE]) # (g C m-2) + outlist[[length(outlist)+1]] <- PEcAn.utils::ud_convert(csomf, "g m-2", "kg m-2") + csoms <- output[thisyear, outputNames == "CSOM_H"] # (g C m-2) + outlist[[length(outlist)+1]] <- PEcAn.utils::ud_convert(csoms, "g m-2", "kg m-2") + nsom <- output[thisyear, outputNames == "NSOM"] # (g N m-2) + outlist[[length(outlist)+1]] <- PEcAn.utils::ud_convert(nsom, "g m-2", "kg m-2") + } else { + csomf <- output[thisyear, which(outputNames == "CSOMF")] # (g C m-2) + outlist[[length(outlist)+1]] <- PEcAn.utils::ud_convert(csomf, "g m-2", "kg m-2") + csoms <- output[thisyear, which(outputNames == "CSOMS")] # (g C m-2) + outlist[[length(outlist)+1]] <- PEcAn.utils::ud_convert(csoms, "g m-2", "kg m-2") + nsomf <- output[thisyear, outputNames == "NSOMF"] # (g N m-2) + nsoms <- output[thisyear, outputNames == "NSOMS"] # (g N m-2) + outlist[[length(outlist)+1]] <- PEcAn.utils::ud_convert(nsomf+nsoms, "g m-2", "kg m-2") + } outlist[[length(outlist)+1]] <- PEcAn.utils::ud_convert(csomf + csoms, "g m-2", "kg m-2") - outlist[[length(outlist)+1]] <- output[thisyear, which(outputNames == "TILG1")] outlist[[length(outlist)+1]] <- output[thisyear, which(outputNames == "TILG2")] outlist[[length(outlist)+1]] <- output[thisyear, which(outputNames == "TILV")] @@ -418,6 +483,9 @@ run_BASGRA <- function(run_met, run_params, site_harvest, site_fertilize, start_ # again this is not technically GPP outlist[[length(outlist)+1]] <- PEcAn.utils::ud_convert(phot, "g m-2", "kg m-2") / sec_in_day + # NPP + outlist[[length(outlist)+1]] <- PEcAn.utils::ud_convert(nee - rsoil, "g m-2", "kg m-2") / sec_in_day + # Qle W/m2 outlist[[length(outlist)+1]] <- ( output[thisyear, which(outputNames == "EVAP")] + output[thisyear, which(outputNames == "TRAN")] * PEcAn.data.atmosphere::get.lv()) / sec_in_day @@ -426,6 +494,29 @@ run_BASGRA <- function(run_met, run_params, site_harvest, site_fertilize, start_ # during the groowing season its depth will mainly be equal to the rooting depth, but during winter its depth will be ROOTD-Fdepth soilm <- output[thisyear, which(outputNames == "WAL")] # mm outlist[[length(outlist)+1]] <- PEcAn.utils::ud_convert(soilm, "mm", "m") * 1000 # (kg m-3) density of water in soil + + # WCL = WAL*0.001 / (ROOTD-Fdepth) Water concentration in non-frozen soil + # need to think about ice! but the sensors maybe don't measure that + ROOTD <- output[thisyear, which(outputNames == "ROOTD")] + Fdepth <- output[thisyear, which(outputNames == "Fdepth")] + outlist[[length(outlist)+1]] <- soilm * 0.001 / (ROOTD - Fdepth) + + # Additional C fluxes + outlist[[length(outlist)+1]] <- PEcAn.utils::ud_convert(output[thisyear, outputNames == "FLITTC_LEAF"], + "g m-2", "kg m-2") / sec_in_day + outlist[[length(outlist)+1]] <- PEcAn.utils::ud_convert(output[thisyear, outputNames == "FLITTC_ROOT"], + "g m-2", "kg m-2") / sec_in_day + outlist[[length(outlist)+1]] <- PEcAn.utils::ud_convert(output[thisyear, outputNames == "FHARVC"], + "g m-2", "kg m-2") / sec_in_day + outlist[[length(outlist)+1]] <- PEcAn.utils::ud_convert(output[thisyear, outputNames == "NEE"], + "g m-2", "kg m-2") / sec_in_day + outlist[[length(outlist)+1]] <- PEcAn.utils::ud_convert(output[thisyear, outputNames == "FRUNOFFC"], + "g m-2", "kg m-2") / sec_in_day + outlist[[length(outlist)+1]] <- PEcAn.utils::ud_convert(output[thisyear, outputNames == "FSOILAMDC"], + "g m-2", "kg m-2") / sec_in_day + outlist[[length(outlist)+1]] <- output[thisyear, outputNames == "TEMPR30"] + outlist[[length(outlist)+1]] <- output[thisyear, outputNames == "PRECIP30"] + # ******************** Declare netCDF dimensions and variables ********************# t <- ncdf4::ncdim_def(name = "time", @@ -456,6 +547,8 @@ run_BASGRA <- function(run_met, run_params, site_harvest, site_fertilize, start_ longname = "Dead Leaf Carbon Content") nc_var[[length(nc_var)+1]] <- PEcAn.utils::to_ncvar("fast_soil_pool_carbon_content", dims) nc_var[[length(nc_var)+1]] <- PEcAn.utils::to_ncvar("slow_soil_pool_carbon_content", dims) + nc_var[[length(nc_var)+1]] <- ncdf4::ncvar_def("soil_organic_nitrogen_content", units = "kg N m-2", dim = dims, missval = -999, + longname = "Soil Organic Nitrogen Content by Layer ") nc_var[[length(nc_var)+1]] <- PEcAn.utils::to_ncvar("TotSoilCarb", dims) nc_var[[length(nc_var)+1]] <- ncdf4::ncvar_def("nonelongating_generative_tiller", units = "m-2", dim = dims, missval = -999, longname = "Non-elongating generative tiller density") @@ -471,9 +564,29 @@ run_BASGRA <- function(run_met, run_params, site_harvest, site_fertilize, start_ nc_var[[length(nc_var)+1]] <- PEcAn.utils::to_ncvar("AutoResp", dims) nc_var[[length(nc_var)+1]] <- PEcAn.utils::to_ncvar("NEE", dims) nc_var[[length(nc_var)+1]] <- PEcAn.utils::to_ncvar("GPP", dims) + nc_var[[length(nc_var)+1]] <- PEcAn.utils::to_ncvar("NPP", dims) nc_var[[length(nc_var)+1]] <- PEcAn.utils::to_ncvar("Qle", dims) nc_var[[length(nc_var)+1]] <- ncdf4::ncvar_def("SoilMoist", units = "kg m-2", dim = dims, missval = -999, longname = "Average Layer Soil Moisture") + nc_var[[length(nc_var)+1]] <- ncdf4::ncvar_def("SoilMoistFrac", units = "m3 m-3", dim = dims, missval = -999, + longname = "Average Layer Fraction of Saturation") + nc_var[[length(nc_var)+1]] <- ncdf4::ncvar_def("leaf_litter_carbon_flux", units = "kg C m-2 s-1", dim = dims, + missval = -999, longname='Flux of carbon from leaf litter to soil pools') + nc_var[[length(nc_var)+1]] <- ncdf4::ncvar_def("fine_root_litter_carbon_flux", units = "kg C m-2 s-1", dim = dims, + missval = -999, longname='Flux of carbon from fine root litter to soil pools') + nc_var[[length(nc_var)+1]] <- ncdf4::ncvar_def("harvest_carbon_flux", units = "kg C m-2 s-1", dim = dims, + missval = -999, longname='Flux of carbon removed by harvest') + nc_var[[length(nc_var)+1]] <- ncdf4::ncvar_def("NEE_alt", units = "kg C m-2 s-1", dim = dims, + missval = -999, longname='Alternative NEE') + nc_var[[length(nc_var)+1]] <- ncdf4::ncvar_def("FRUNOFFC", units = "kg C m-2 s-1", dim = dims, + missval = -999, longname='C in runoff') + nc_var[[length(nc_var)+1]] <- ncdf4::ncvar_def("FSOILAMDC", units = "kg C m-2 s-1", dim = dims, + missval = -999, longname='Flux of carbon input in soil amendments') + nc_var[[length(nc_var)+1]] <- ncdf4::ncvar_def("TEMPR30", units = "degC", dim = dims, + missval = -999, longname='Smoothed air temperature') + nc_var[[length(nc_var)+1]] <- ncdf4::ncvar_def("PRECIP30", units = "mm/day", dim = dims, + missval = -999, longname='Smoothed daily precipitation') + # ******************** Declare netCDF variables ********************# diff --git a/models/basgra/R/version.R b/models/basgra/R/version.R new file mode 100644 index 00000000000..0e58d885272 --- /dev/null +++ b/models/basgra/R/version.R @@ -0,0 +1,3 @@ +# Set at package install time, used by pecan.all::pecan_version() +# to identify development versions of packages +.build_hash <- Sys.getenv("PECAN_GIT_REV", "unknown") diff --git a/models/basgra/R/write.config.BASGRA.R b/models/basgra/R/write.config.BASGRA.R index 0f9217d0f47..da3d55474ac 100644 --- a/models/basgra/R/write.config.BASGRA.R +++ b/models/basgra/R/write.config.BASGRA.R @@ -1,12 +1,3 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2012 University of Illinois, NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# University of Illinois/NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- - ##-------------------------------------------------------------------------------------------------# ##' Writes a BASGRA config file. ##' @@ -31,10 +22,13 @@ write.config.BASGRA <- function(defaults, trait.values, settings, run.id, IC = N outdir <- file.path(settings$host$outdir, run.id) # load default(!) BASGRA params - run_params <- PEcAn.utils::load_local(system.file("BASGRA_params.Rdata", package = "PEcAn.BASGRA"))$default_params - + if (!is.null(settings$run$inputs$defaults$path)) { + df_run_params <- utils::read.csv(settings$run$inputs$defaults$path) + } else { + df_run_params <- utils::read.csv(system.file("BASGRA_params.csv", package = "PEcAn.BASGRA")) + } + run_params <- stats::setNames(df_run_params[,2], df_run_params[,1]) run_params[which(names(run_params) == "LAT")] <- as.numeric(settings$run$site$lat) - #### write run-specific PFT parameters here #### Get parameters being handled by PEcAn for (pft in seq_along(trait.values)) { @@ -161,6 +155,30 @@ write.config.BASGRA <- function(defaults, trait.values, settings, run.id, IC = N run_params[which(names(run_params) == "TVERN")] <- pft.traits[which(pft.names == "vernalization_threshold")] } + if ("hardening_parameter" %in% pft.names) { + run_params[which(names(run_params) == "Hparam")] <- pft.traits[which(pft.names == "hardening_parameter")] + } + + if ("max_res_abg" %in% pft.names) { + run_params[which(names(run_params) == "COCRESMX")] <- pft.traits[which(pft.names == "max_res_abg")] + } + + if ("max_size_etil" %in% pft.names) { + run_params[which(names(run_params) == "CSTAVM")] <- pft.traits[which(pft.names == "max_size_etil")] + } + + if ("maxSLAmin" %in% pft.names) { + run_params[which(names(run_params) == "FSLAMIN")] <- pft.traits[which(pft.names == "maxSLAmin")] + } + + if ("rehardening_disappears" %in% pft.names) { + run_params[which(names(run_params) == "reHardRedDay")] <- pft.traits[which(pft.names == "rehardening_disappears")] + } + + if ("LUE_increase" %in% pft.names) { + run_params[which(names(run_params) == "KLUETILG")] <- pft.traits[which(pft.names == "LUE_increase")] + } + ##### Soil parameters @@ -248,79 +266,274 @@ write.config.BASGRA <- function(defaults, trait.values, settings, run.id, IC = N if ("etil_resv_harv" %in% pft.names) { run_params[which(names(run_params) == "HAGERE")] <- pft.traits[which(pft.names == "etil_resv_harv")] } + + # Yasso decomposition parameters + param_pairs <- list( + c('som_a_decomp_rate', 'yasso_alpha_a'), c('som_w_decomp_rate', 'yasso_alpha_w'), + c('som_e_decomp_rate', 'yasso_alpha_e'), c('som_n_decomp_rate', 'yasso_alpha_n'), + c('yasso_rate_pc', 'yasso_rate_pc'), c('yasso_tresp_pc', 'yasso_tres_pc') + ) + for (param_pair in param_pairs) { + # Yasso-specific params + if (param_pair[1] %in% pft.names) { + run_params[which(names(run_params) == param_pair[2])] <- pft.traits[which(pft.names == param_pair[1])] + } + } } #### End parameter update - - - + #### Update initial conditions if (!is.null(IC)) { ic.names <- names(IC) - if ("LAI" %in% ic.names) { - run_params[names(run_params) == "LOG10LAII"] <- IC$LAI + # let's combine these here + last_vals <- c() + last_states_file <- file.path(outdir, "last_vals_basgra.Rdata") + + if(!file.exists(last_states_file) & is.null(IC$test_vals)){ + PEcAn.logger::logger.severe("Last step output values are missing for restart.") + }else if(!is.null(IC$test_vals)){ + # for package testing + last_vals <- IC$test_vals + }else{ + load(last_states_file) } - if ("fast_soil_pool_carbon_content" %in% ic.names) { - run_params[names(run_params) == "CSOMF0"] <- PEcAn.utils::ud_convert(IC$fast_soil_pool_carbon_content, "kg", "g") + if ("LAI" %in% ic.names) { + run_params[names(run_params) == "LOG10LAII"] <- IC$LAI + }else{ + run_params[names(run_params) == "LOG10LAII"] <- last_vals[names(last_vals) == "LAI"] } - if ("slow_soil_pool_carbon_content" %in% ic.names) { - run_params[names(run_params) == "CSOMS0"] <- PEcAn.utils::ud_convert(IC$slow_soil_pool_carbon_content, "kg", "g") + # For Yasso restart + if(run_params[names(run_params) == "use_yasso"]){ + + last_somf <- sum(last_vals[names(last_vals) == "CSOM_A"], + last_vals[names(last_vals) == "CSOM_W"], + last_vals[names(last_vals) == "CSOM_E"], + last_vals[names(last_vals) == "CSOM_N"]) + + last_soms <- last_vals[names(last_vals) == "CSOM_H"] + + if ("fast_soil_pool_carbon_content" %in% ic.names & "slow_soil_pool_carbon_content" %in% ic.names) { + + new_somf <- PEcAn.utils::ud_convert(IC$fast_soil_pool_carbon_content, "kg", "g") + new_soms <- PEcAn.utils::ud_convert(IC$slow_soil_pool_carbon_content, "kg", "g") + + ratio_somf <- new_somf / last_somf + + # update via ratio + run_params[names(run_params) == "CSOM_A"] <- ratio_somf * last_vals[names(last_vals) == "CSOM_A"] + run_params[names(run_params) == "CSOM_W"] <- ratio_somf * last_vals[names(last_vals) == "CSOM_W"] + run_params[names(run_params) == "CSOM_E"] <- ratio_somf * last_vals[names(last_vals) == "CSOM_E"] + run_params[names(run_params) == "CSOM_N"] <- ratio_somf * last_vals[names(last_vals) == "CSOM_N"] + run_params[names(run_params) == "CSOM_H"] <- new_soms + + run_params[names(run_params) == "NSOM"] <- ((new_somf+new_soms)/(last_somf+last_soms)) * last_vals[names(last_vals) == "NSOM"] + + }else{ + + run_params[names(run_params) == "CSOM_A"] <- last_vals[names(last_vals) == "CSOM_A"] + run_params[names(run_params) == "CSOM_W"] <- last_vals[names(last_vals) == "CSOM_W"] + run_params[names(run_params) == "CSOM_E"] <- last_vals[names(last_vals) == "CSOM_E"] + run_params[names(run_params) == "CSOM_N"] <- last_vals[names(last_vals) == "CSOM_N"] + run_params[names(run_params) == "CSOM_H"] <- last_soms + + run_params[names(run_params) == "NSOM"] <- last_vals[names(last_vals) == "NSOM"] + + } + + # #else-if ("TotSoilCarb" %in% ic.names)? + # new_totc <- PEcAn.utils::ud_convert(IC$TotSoilCarb, "kg", "g") + # + # ratio_soc <- new_totc / (last_somf + last_soms) + # + # # update via ratio + # run_params[names(run_params) == "CSOM_A"] <- ratio_soc * last_vals[names(last_vals) == "CSOM_A"] + # run_params[names(run_params) == "CSOM_W"] <- ratio_soc * last_vals[names(last_vals) == "CSOM_W"] + # run_params[names(run_params) == "CSOM_E"] <- ratio_soc * last_vals[names(last_vals) == "CSOM_E"] + # run_params[names(run_params) == "CSOM_N"] <- ratio_soc * last_vals[names(last_vals) == "CSOM_N"] + # run_params[names(run_params) == "CSOM_H"] <- ratio_soc * last_vals[names(last_vals) == "CSOM_H"] + + + }else{ # no Yasso + if ("fast_soil_pool_carbon_content" %in% ic.names) { + run_params[names(run_params) == "CSOMF0"] <- PEcAn.utils::ud_convert(IC$fast_soil_pool_carbon_content, "kg", "g") + }else{ + run_params[names(run_params) == "CSOMF0"] <- last_vals[names(last_vals) == "CSOMF"] + } + run_params[names(run_params) == "NSOMF0"] <- run_params[names(run_params) == "CSOMF0"] / run_params[names(run_params) == "CNSOMF0"] + + if ("slow_soil_pool_carbon_content" %in% ic.names) { + run_params[names(run_params) == "CSOMS0"] <- PEcAn.utils::ud_convert(IC$slow_soil_pool_carbon_content, "kg", "g") + }else{ + run_params[names(run_params) == "CSOMS0"] <- last_vals[names(last_vals) == "CSOMS"] + } + run_params[names(run_params) == "NSOMS0"] <- run_params[names(run_params) == "CSOMS0"] / run_params[names(run_params) == "CNSOMS0"] + } if ("CropYield" %in% ic.names) { - run_params[names(run_params) == "YIELDI"] <- PEcAn.utils::ud_convert(IC$CropYield, "kg", "g") + run_params[names(run_params) == "YIELDI"] <- PEcAn.utils::ud_convert(IC$CropYield, "kg", "g") + }else{ + run_params[names(run_params) == "YIELDI"] <- last_vals[names(last_vals) == "YIELD_POT"] } if ("litter_carbon_content" %in% ic.names) { - run_params[names(run_params) == "CLITT0"] <- PEcAn.utils::ud_convert(IC$litter_carbon_content, "kg", "g") + run_params[names(run_params) == "CLITT0"] <- PEcAn.utils::ud_convert(IC$litter_carbon_content, "kg", "g") + }else{ + run_params[names(run_params) == "CLITT0"] <- last_vals[names(last_vals) == "CLITT"] } + #run_params[names(run_params) == "NLITT0"] <- run_params[names(run_params) == "CLITT0"] / run_params[names(run_params) == "CNLITT0"] + run_params[which(names(run_params) == "NLITT0")] <- last_vals[names(last_vals) == "NLITT"] - # not as important as others but you can throw this into the SDA too, then comment out last value overwriting below - # if ("stubble_carbon_content" %in% ic.names) { - # run_params[names(run_params) == "CSTUBI"] <- PEcAn.utils::ud_convert(IC$stubble_carbon_content, "kg", "g") - # } + if ("stubble_carbon_content" %in% ic.names) { + run_params[names(run_params) == "CSTUBI"] <- PEcAn.utils::ud_convert(IC$stubble_carbon_content, "kg", "g") + }else{ + run_params[names(run_params) == "CSTUBI"] <- last_vals[names(last_vals) == "CSTUB"] + } if ("stem_carbon_content" %in% ic.names) { - run_params[names(run_params) == "CSTI"] <- PEcAn.utils::ud_convert(IC$stem_carbon_content, "kg", "g") + run_params[names(run_params) == "CSTI"] <- PEcAn.utils::ud_convert(IC$stem_carbon_content, "kg", "g") + }else{ + run_params[names(run_params) == "CSTI"] <- last_vals[names(last_vals) == "CST"] } + # NRT = NCR * CRTI + #run_params[names(run_params) == "NCR"] <- last_vals[names(last_vals) == "NRT"] / last_vals[names(last_vals) == "CRT"] if ("root_carbon_content" %in% ic.names) { run_params[names(run_params) == "LOG10CRTI"] <- PEcAn.utils::ud_convert(IC$root_carbon_content, "kg", "g") + }else{ + run_params[names(run_params) == "LOG10CRTI"] <- last_vals[names(last_vals) == "CRT"] } + run_params[which(names(run_params) == "NRTI")] <- run_params[names(run_params) == "LOG10CRTI"]*run_params[names(run_params) == "NCR"] + # if(run_params[which(names(run_params) == "NRTI")] <= 0) run_params[which(names(run_params) == "NRTI")] <- 0.0001 + + # # NCSHI = NCSHMAX * (1-EXP(-K*LAII)) / (K*LAII) + # # NSH = NCSHI * (CLVI+CSTI) + lai_tmp <- run_params[names(run_params) == "LOG10LAII"] + ncshi <- run_params[names(run_params) == "NCSHMAX"] * + (1-exp(-run_params[names(run_params) == "K"]*lai_tmp)) / (run_params[names(run_params) == "K"]*lai_tmp) + run_params[which(names(run_params) == "NSHI")] <- ncshi * + ((run_params[names(run_params) == "LOG10CLVI"]) + run_params[names(run_params) == "CSTI"]) + if ("reserve_carbon_content" %in% ic.names) { - run_params[names(run_params) == "LOG10CRESI"] <- PEcAn.utils::ud_convert(IC$reserve_carbon_content, "kg", "g") + run_params[names(run_params) == "LOG10CRESI"] <- PEcAn.utils::ud_convert(IC$reserve_carbon_content, "kg", "g") + }else{ + run_params[names(run_params) == "LOG10CRESI"] <- last_vals[names(last_vals) == "CRES"] } if ("leaf_carbon_content" %in% ic.names) { - run_params[names(run_params) == "LOG10CLVI"] <- PEcAn.utils::ud_convert(IC$leaf_carbon_content, "kg", "g") + run_params[names(run_params) == "LOG10CLVI"] <- PEcAn.utils::ud_convert(IC$leaf_carbon_content, "kg", "g") + }else{ + run_params[names(run_params) == "LOG10CLVI"] <- last_vals[names(last_vals) == "CLV"] } if ("dead_leaf_carbon_content" %in% ic.names) { - run_params[names(run_params) == "CLVDI"] <- PEcAn.utils::ud_convert(IC$dead_leaf_carbon_content, "kg", "g") + run_params[names(run_params) == "CLVDI"] <- PEcAn.utils::ud_convert(IC$dead_leaf_carbon_content, "kg", "g") + }else{ + run_params[names(run_params) == "CLVDI"] <- last_vals[names(last_vals) == "CLVD"] + } + + if ("tiller_density" %in% ic.names) { + run_params[names(run_params) == "TILTOTI"] <- IC$tiller_density # all the tillers are updated from this with respect to model preserved ratios + }else{ + run_params[names(run_params) == "TILTOTI"] <- last_vals[names(last_vals) == "TILTOT"] } + run_params[names(run_params) == "FRTILGI"] <- last_vals[names(last_vals) == "FRTILG"] + + if(run_params[names(run_params) == "FRTILGI"] == 0) run_params[names(run_params) == "FRTILGI"] <- 0.01 + + #TILV = TILTOTI * (1. - FRTILGI) + if ("nonelongating_vegetative_tiller" %in% ic.names) { + run_params[names(run_params) == "TILVI"] <- IC$nonelongating_vegetative_tiller + # preserve ratio + #run_params[names(run_params) == "FRTILGI"] <- 1 - (run_params[names(run_params) == "TILVI"]/run_params[names(run_params) == "TILTOTI"]) + }else{ + run_params[names(run_params) == "TILVI"] <- run_params[names(run_params) == "TILTOTI"] * (1-run_params[names(run_params) == "FRTILGI"]) + } + + gtil <- run_params[names(run_params) == "TILTOTI"] - run_params[names(run_params) == "TILVI"] + + #TILG1 = TILTOTI * FRTILGI * FRTILGG1I if ("nonelongating_generative_tiller" %in% ic.names) { run_params[names(run_params) == "TILG1I"] <- IC$nonelongating_generative_tiller + # can also update FRTILGG1I but I don't throw these into the state matrix anymore and TILG1I is initialized from its own variable, not derived from fractions + }else{ + run_params[names(run_params) == "TILG1I"] <- gtil*(last_vals[names(last_vals) == "TILG1"] / + (last_vals[names(last_vals) == "TILG1"]+last_vals[names(last_vals) == "TILG2"])) + if(is.nan(run_params[names(run_params) == "TILG1I"])) run_params[names(run_params) == "TILG1I"] <- 1 + #if(is.infinite(run_params[names(run_params) == "TILG1I"])) run_params[names(run_params) == "TILG1I"] <- 1 } - + + #TILG2 = TILTOTI * FRTILGI * (1-FRTILGG1I) if ("elongating_generative_tiller" %in% ic.names) { run_params[names(run_params) == "TILG2I"] <- IC$elongating_generative_tiller + }else{ + run_params[names(run_params) == "TILG2I"] <- gtil*(last_vals[names(last_vals) == "TILG2"] / + (last_vals[names(last_vals) == "TILG1"]+last_vals[names(last_vals) == "TILG2"])) + if(is.nan(run_params[names(run_params) == "TILG2I"])) run_params[names(run_params) == "TILG2I"] <- 1 + # if(is.infinite(run_params[names(run_params) == "TILG2I"])) run_params[names(run_params) == "TILG2I"] <- 1 + } + + if ("phenological_stage" %in% ic.names) { + run_params[names(run_params) == "PHENI"] <- IC$phenological_stage + }else{ + run_params[names(run_params) == "PHENI"] <- last_vals[names(last_vals) == "PHEN"] } - if ("nonelongating_vegetative_tiller" %in% ic.names) { - run_params[names(run_params) == "TILVI"] <- IC$nonelongating_vegetative_tiller + if ("lethal_temperature50" %in% ic.names) { + run_params[names(run_params) == "LT50I"] <- IC$lethal_temperature50 + }else{ + run_params[names(run_params) == "LT50I"] <- last_vals[names(last_vals) == "LT50"] } - if ("tiller_density" %in% ic.names) { - run_params[names(run_params) == "TILTOTI"] <- IC$tiller_density + + if ("rooting_depth" %in% ic.names) { + run_params[names(run_params) == "ROOTDM"] <- IC$rooting_depth + }else{ + run_params[names(run_params) == "ROOTDM"] <- last_vals[names(last_vals) == "ROOTD"] # this doesn't change } - - if ("phenological_stage" %in% ic.names) { - run_params[names(run_params) == "PHENI"] <- IC$phenological_stage + + # these change too + run_params[names(run_params) == "TEMPR30"] <- last_vals[names(last_vals) == "TEMPR30"] + run_params[names(run_params) == "PRECIP30"] <- last_vals[names(last_vals) == "PRECIP30"] + + run_params[names(run_params) == "DAYLI"] <- last_vals[names(last_vals) == "DAYL"] + + run_params[names(run_params) == "NMIN0"] <- last_vals[names(last_vals) == "NMIN"] + + run_params[names(run_params) == "O2I"] <- last_vals[names(last_vals) == "O2"] + + + # water stuff, to be in SDA + + run_params[names(run_params) == "DRYSTORI"] <- last_vals[names(last_vals) == "DRYSTOR"] + run_params[names(run_params) == "FdepthI"] <- last_vals[names(last_vals) == "Fdepth"] + run_params[names(run_params) == "SDEPTHI"] <- last_vals[names(last_vals) == "Sdepth"] + run_params[names(run_params) == "TANAERI"] <- last_vals[names(last_vals) == "TANAER"] + run_params[names(run_params) == "WAPLI"] <- last_vals[names(last_vals) == "WAPL"] + run_params[names(run_params) == "WAPSI"] <- last_vals[names(last_vals) == "WAPS"] + run_params[names(run_params) == "WASI"] <- last_vals[names(last_vals) == "WAS"] + run_params[names(run_params) == "WETSTORI"] <- last_vals[names(last_vals) == "WETSTOR"] + + # WAL = 1000. * ROOTDM * WCI + if ("SoilMoistFrac" %in% ic.names) { + run_params[names(run_params) == "WCI"] <- IC$SoilMoistFrac + run_params[names(run_params) == "WALI"] <- 1000. * (run_params[names(run_params) == "ROOTDM"] - run_params[names(run_params) == "FdepthI"]) * run_params[names(run_params) == "WCI"] + }else{ + run_params[names(run_params) == "WALI"] <- last_vals[names(last_vals) == "WAL"] + run_params[names(run_params) == "WCI"] <- run_params[names(run_params) == "WALI"] / (1000 * (run_params[names(run_params) == "ROOTDM"]- run_params[names(run_params) == "FdepthI"])) + } + + yasso_pools <- c('CSOM_A', 'CSOM_W', 'CSOM_E', 'CSOM_N', 'CSOM_H', 'NSOM', 'TEMPR30', 'PRECIP30') + for (p in yasso_pools) { + if (p %in% ic.names) { + run_params[names(run_params) == p] <- IC[[p]] + } } @@ -372,11 +585,17 @@ write.config.BASGRA <- function(defaults, trait.values, settings, run.id, IC = N } # Initial mineral N - nmin0 <- try(ncdf4::ncvar_get(IC.nc, "soil_nitrogen_content"), silent = TRUE) + nmin0 <- try(ncdf4::ncvar_get(IC.nc, "soil_inorganic_nitrogen_content"), silent = TRUE) if (!is.na(nmin0) && is.numeric(nmin0)) { run_params[which(names(run_params) == "NMIN0")] <- PEcAn.utils::ud_convert(nmin0, "kg", "g") } + # Initial organic N + nsom0 <- try(ncdf4::ncvar_get(IC.nc, "soil_organic_nitrogen_content"), silent = TRUE) + if (!is.na(nsom0) && is.numeric(nsom0)) { + run_params[which(names(run_params) == "NSOM")] <- PEcAn.utils::ud_convert(nsom0, "kg", "g") + } + # Rooting depth (m) rootd <- try(ncdf4::ncvar_get(IC.nc, "rooting_depth"), silent = TRUE) if (!is.na(rootd) && is.numeric(rootd)) { @@ -395,6 +614,21 @@ write.config.BASGRA <- function(defaults, trait.values, settings, run.id, IC = N run_params[which(names(run_params) == "TILTOTI")] <- tiltoti } + tilg1 <- try(ncdf4::ncvar_get(IC.nc, "nonelongating_generative_tiller"), silent = TRUE) + if (!is.na(tilg1) && is.numeric(tilg1)) { + run_params[names(run_params) == "TILG1I"] <- tilg1 + } + + tilg2 <- try(ncdf4::ncvar_get(IC.nc, "elongating_generative_tiller"), silent = TRUE) + if (!is.na(tilg2) && is.numeric(tilg2)) { + run_params[names(run_params) == "TILG2I"] <- tilg2 + } + + tilv <- try(ncdf4::ncvar_get(IC.nc, "nonelongating_vegetative_tiller"), silent = TRUE) + if (!is.na(tilv) && is.numeric(tilv)) { + run_params[names(run_params) == "TILVI"] <- tilv + } + # Phenological stage pheni <- try(ncdf4::ncvar_get(IC.nc, "phenological_stage"), silent = TRUE) if (!is.na(pheni) && is.numeric(pheni)) { @@ -440,98 +674,20 @@ write.config.BASGRA <- function(defaults, trait.values, settings, run.id, IC = N run_params[which(names(run_params) == "FWCWP")] <- wcwp / wcst } + yasso_pools <- c('CSOM_A', 'CSOM_W', 'CSOM_E', 'CSOM_N', 'CSOM_H', 'NSOM', 'TEMPR30', 'PRECIP30') + for (p in yasso_pools) { + value <- try(ncdf4::ncvar_get(IC.nc, p), silent=TRUE) + if (!is.na(value) && is.numeric(value)) { + run_params[names(run_params) == p] <- value + } + } } + - # THESE "PARAMETERS" (IN FACT, INITIAL CONDITIONS) WERE NOT PART OF THE ORIGINAL VECTOR - # THESE DERIVATIONS WERE PART OF THE BASGRA CODE, NOW TAKEN OUT HERE BECAUSE OF SDA - # BUT WHEN NOT DOING SDA WE STILL NEED TO PASS THEM - - # NRT = NCR * CRTI - run_params[which(names(run_params) == "NRTI")] <- run_params[names(run_params) == "LOG10CRTI"]* - run_params[names(run_params) == "NCR"] - - # NCSHI = NCSHMAX * (1-EXP(-K*LAII)) / (K*LAII) - # NSH = NCSHI * (CLVI+CSTI) - lai_tmp <- run_params[names(run_params) == "LOG10LAII"] - ncshi <- run_params[names(run_params) == "NCSHMAX"] * - (1-exp(-run_params[names(run_params) == "K"]*lai_tmp)) / (run_params[names(run_params) == "K"]*lai_tmp) - run_params[which(names(run_params) == "NSHI")] <- ncshi * - ((run_params[names(run_params) == "LOG10CLVI"]) + run_params[names(run_params) == "CSTI"]) - - # WAL = 1000. * ROOTDM * WCI - run_params[names(run_params) == "WALI"] <- 1000. * run_params[names(run_params) == "ROOTDM"] * run_params[names(run_params) == "WCI"] - - # O2 = FGAS * ROOTDM * FO2MX * 1000./22.4 - run_params[names(run_params) == "O2I"] <- run_params[names(run_params) == "FGAS"] * - run_params[names(run_params) == "ROOTDM"] * run_params[names(run_params) == "FO2MX"] * 1000./22.4 - - #NLITT = CLITT0 / CNLITT0 - run_params[names(run_params) == "NLITT0"] <- run_params[names(run_params) == "CLITT0"] / run_params[names(run_params) == "CNLITT0"] - - #NSOMF = (CSOM0 * FCSOMF0) / CNSOMF0 - run_params[names(run_params) == "NSOMF0"] <- run_params[names(run_params) == "CSOMF0"] / run_params[names(run_params) == "CNSOMF0"] - run_params[names(run_params) == "NSOMS0"] <- run_params[names(run_params) == "CSOMS0"] / run_params[names(run_params) == "CNSOMS0"] - - - - ################################################################## - ######################### PREVIOUS STATE ######################### - ################################################################## - - # overwrite initial values with previous time steps - # as model2netcdf is developed, some or all of these can be dropped? - last_vals <- c() - last_states_file <- file.path(outdir, "last_vals_basgra.Rdata") - - if(file.exists(last_states_file)){ - - # TODO: certain variables should be thrown into the state matrix in SDA together - # but in case someone forgot to do so, make sure those missing values are passed from where we left off here - - load(last_states_file) - - # SDA handles this now - # PHENI = pa(6) - run_params[names(run_params) == "PHENI"] <- last_vals[names(last_vals) == "PHEN"] - - # LT50I = pa(9) - run_params[names(run_params) == "LT50I"] <- last_vals[names(last_vals) == "LT50"] - - run_params[names(run_params) == "CSTUBI"] <- last_vals[names(last_vals) == "CSTUB"] - - run_params[names(run_params) == "ROOTDM"] <- last_vals[names(last_vals) == "ROOTD"] - - run_params[names(run_params) == "DRYSTORI"] <- last_vals[names(last_vals) == "DRYSTOR"] - run_params[names(run_params) == "FdepthI"] <- last_vals[names(last_vals) == "Fdepth"] - run_params[names(run_params) == "SDEPTHI"] <- last_vals[names(last_vals) == "Sdepth"] - run_params[names(run_params) == "TANAERI"] <- last_vals[names(last_vals) == "TANAER"] - run_params[names(run_params) == "WAPLI"] <- last_vals[names(last_vals) == "WAPL"] - run_params[names(run_params) == "WAPSI"] <- last_vals[names(last_vals) == "WAPS"] - run_params[names(run_params) == "WASI"] <- last_vals[names(last_vals) == "WAS"] - run_params[names(run_params) == "WETSTORI"] <- last_vals[names(last_vals) == "WETSTOR"] - - - run_params[names(run_params) == "FRTILGI"] <- last_vals[names(last_vals) == "FRTILG"] - - #TILV = TILTOTI * (1. - FRTILGI) - #TILG1 = TILTOTI * FRTILGI * FRTILGG1I - #TILG2 = TILTOTI * FRTILGI * (1-FRTILGG1I) - - run_params[names(run_params) == "TILVI"] <- run_params[names(run_params) == "TILTOTI"] * (1-run_params[names(run_params) == "FRTILGI"]) - gtil <- run_params[names(run_params) == "TILTOTI"] - run_params[names(run_params) == "TILVI"] - run_params[names(run_params) == "TILG1I"] <- gtil*last_vals[names(last_vals) == "TILG1"] / - (last_vals[names(last_vals) == "TILTOT"] - last_vals[names(last_vals) == "TILV"]) - run_params[names(run_params) == "TILG2I"] <- gtil*last_vals[names(last_vals) == "TILG2"] / - (last_vals[names(last_vals) == "TILTOT"] - last_vals[names(last_vals) == "TILV"]) - - run_params[names(run_params) == "DAYLI"] <- last_vals[names(last_vals) == "DAYL"] - - run_params[names(run_params) == "NMIN0"] <- last_vals[names(last_vals) == "NMIN"] - - run_params[names(run_params) == "WALI"] <- last_vals[names(last_vals) == "WAL"] - run_params[names(run_params) == "WCI"] <- last_vals[names(last_vals) == "WAL"] / (1000 * last_vals[names(last_vals) == "ROOTD"]) - run_params[names(run_params) == "O2I"] <- last_vals[names(last_vals) == "O2"] - + # if the default parameter file is set to force some parameter values, override the trait.values here: + if ('force' %in% colnames(df_run_params)) { + mask <- as.logical(df_run_params$force) + run_params[mask] <- df_run_params$value[mask] } @@ -583,12 +739,16 @@ write.config.BASGRA <- function(defaults, trait.values, settings, run.id, IC = N jobsh <- gsub("@OUTDIR@", outdir, jobsh) jobsh <- gsub("@RUNDIR@", rundir, jobsh) + if (!is.null(settings$run$write.raw.output)) { + jobsh <- gsub("@WRITE_RAW_OUTPUT@", settings$run$write.raw.output, jobsh) + } else { + jobsh <- gsub("@WRITE_RAW_OUTPUT@", FALSE, jobsh) + } jobsh <- gsub( "@RUN_PARAMS@", paste0("c(", PEcAn.utils::listToArgString(run_params), ")"), jobsh) - writeLines(jobsh, con = file.path(settings$rundir, run.id, "job.sh")) Sys.chmod(file.path(settings$rundir, run.id, "job.sh")) diff --git a/models/basgra/R/write_restart.BASGRA.R b/models/basgra/R/write_restart.BASGRA.R index acc430869b7..6b4c8062a7c 100644 --- a/models/basgra/R/write_restart.BASGRA.R +++ b/models/basgra/R/write_restart.BASGRA.R @@ -22,7 +22,7 @@ write_restart.BASGRA <- function(outdir, runid, start.time, stop.time, settings, if ("LAI" %in% variables) { analysis.save[[length(analysis.save) + 1]] <- new.state$LAI - if (new.state$LAI < 0) analysis.save[[length(analysis.save)]] <- 0.00001 + if (new.state$LAI < 0) analysis.save[[length(analysis.save)]] <- 0.0001 names(analysis.save[[length(analysis.save)]]) <- c("LAI") } @@ -38,6 +38,18 @@ write_restart.BASGRA <- function(outdir, runid, start.time, stop.time, settings, names(analysis.save[[length(analysis.save)]]) <- c("slow_soil_pool_carbon_content") } + if ("soil_organic_nitrogen_content" %in% variables) { + analysis.save[[length(analysis.save) + 1]] <- new.state$soil_nitrogen_content + if (new.state$soil_nitrogen_content < 0) analysis.save[[length(analysis.save)]] <- 0 + names(analysis.save[[length(analysis.save)]]) <- c("soil_nitrogen_content") + } + + if ("TotSoilCarb" %in% variables) { + analysis.save[[length(analysis.save) + 1]] <- new.state$TotSoilCarb + if (new.state$TotSoilCarb < 0) analysis.save[[length(analysis.save)]] <- 0 + names(analysis.save[[length(analysis.save)]]) <- c("TotSoilCarb") + } + if ("CropYield" %in% variables) { analysis.save[[length(analysis.save) + 1]] <- new.state$CropYield if (new.state$CropYield < 0) analysis.save[[length(analysis.save)]] <- 0 @@ -64,19 +76,19 @@ write_restart.BASGRA <- function(outdir, runid, start.time, stop.time, settings, if ("root_carbon_content" %in% variables) { analysis.save[[length(analysis.save) + 1]] <- new.state$root_carbon_content - if (new.state$root_carbon_content < 0) analysis.save[[length(analysis.save)]] <- 0 + if (new.state$root_carbon_content < 0) analysis.save[[length(analysis.save)]] <- 0.0001 names(analysis.save[[length(analysis.save)]]) <- c("root_carbon_content") } if ("reserve_carbon_content" %in% variables) { analysis.save[[length(analysis.save) + 1]] <- new.state$reserve_carbon_content - if (new.state$reserve_carbon_content < 0) analysis.save[[length(analysis.save)]] <- 0 + if (new.state$reserve_carbon_content < 0) analysis.save[[length(analysis.save)]] <- 1e-05 names(analysis.save[[length(analysis.save)]]) <- c("reserve_carbon_content") } if ("leaf_carbon_content" %in% variables) { analysis.save[[length(analysis.save) + 1]] <- new.state$leaf_carbon_content - if (new.state$leaf_carbon_content < 0) analysis.save[[length(analysis.save)]] <- 0 + if (new.state$leaf_carbon_content < 0) analysis.save[[length(analysis.save)]] <- 0.001 names(analysis.save[[length(analysis.save)]]) <- c("leaf_carbon_content") } @@ -112,10 +124,18 @@ write_restart.BASGRA <- function(outdir, runid, start.time, stop.time, settings, if ("phenological_stage" %in% variables) { analysis.save[[length(analysis.save) + 1]] <- new.state$phenological_stage - if (new.state$phenological_stage < 0) analysis.save[[length(analysis.save)]] <- 0.01 + if (new.state$phenological_stage < 0) analysis.save[[length(analysis.save)]] <- 0 + if (new.state$phenological_stage > 1) analysis.save[[length(analysis.save)]] <- 1 names(analysis.save[[length(analysis.save)]]) <- c("phenological_stage") } + if ("SoilMoistFrac" %in% variables) { + analysis.save[[length(analysis.save) + 1]] <- new.state$SoilMoistFrac + if (new.state$SoilMoistFrac < 0) analysis.save[[length(analysis.save)]] <- 0.001 + if (new.state$SoilMoistFrac > 1) analysis.save[[length(analysis.save)]] <- 1 + names(analysis.save[[length(analysis.save)]]) <- c("SoilMoistFrac") + } + if (!is.null(analysis.save) && length(analysis.save) > 0){ analysis.save.mat <- data.frame(matrix(unlist(analysis.save, use.names = TRUE), nrow = 1)) colnames(analysis.save.mat) <- names(unlist(analysis.save)) diff --git a/models/basgra/inst/BASGRA_params.Rdata b/models/basgra/inst/BASGRA_params.Rdata deleted file mode 100644 index 003a9b25395..00000000000 Binary files a/models/basgra/inst/BASGRA_params.Rdata and /dev/null differ diff --git a/models/basgra/inst/BASGRA_params.csv b/models/basgra/inst/BASGRA_params.csv new file mode 100644 index 00000000000..7cd07db89c7 --- /dev/null +++ b/models/basgra/inst/BASGRA_params.csv @@ -0,0 +1,161 @@ +"","default_params" +"LOG10CLVI",1 +"LOG10CRESI",1.2 +"LOG10CRTI",1.2 +"CSTI",0 +"LOG10LAII",0.1 +"PHENI",0.01 +"TILTOTI",1000 +"FRTILGI",0.02 +"LT50I",-4.7894 +"CLAIV",1 +"COCRESMX",0.08 +"CSTAVM",0.229575 +"DAYLB",0.539664 +"DAYLP",0.632348 +"DLMXGE",0.836974 +"FSLAMIN",0.75 +"FSMAX",0.893 +"HAGERE",0.2 +"K",0.4669 +"LAICR",3.79569 +"LAIEFT",0.2 +"LAITIL",0.566455 +"LFWIDG",0.0085207 +"LFWIDV",0.00491875 +"NELLVM",2.09178 +"PHENCR",0.495209 +"PHY",45.09 +"RDRSCO",0.0712247 +"RDRSMX",0.06 +"RDRTEM",0.00102573 +"RGENMX",0.0108797 +"ROOTDM",0.95 +"RRDMAX",0.012 +"RUBISC",3.7803 +"SHAPE",0.538907 +"SIMAX1T",0.005471 +"SLAMAX",0.0781 +"TBASE",2.766 +"TCRES",2.639 +"TOPTGE",21.37 +"TRANCO",6.435 +"YG",0.8759 +"LAT",58 +"WCI",0.4 +"FWCAD",0.011363636 +"FWCWP",0.35 +"FWCFC",0.75 +"FWCWET",1 +"WCST",0.44 +"WpoolMax",50 +"Dparam",0.003 +"FGAS",0.4175 +"FO2MX",0.2008 +"gamma",68.86 +"Hparam",0.02 +"KRDRANAER",0.6396 +"KRESPHARD",0.02244 +"KRSR3H",0.9618 +"KRTOTAER",1.775 +"KSNOW",0.03871 +"LAMBDAsoil",199000 +"LDT50A",1.555 +"LDT50B",-2.172 +"LT50MN",-25.47 +"LT50MX",-4.544 +"RATEDMX",1.81 +"reHardRedDay",133.7 +"RHOnewSnow",85.86 +"RHOpack",0.02187 +"SWret",0.08415 +"SWrf",0.0104 +"THARDMX",20 +"TmeltFreeze",0 +"TrainSnow",0.01062 +"TsurfDiff",3.353 +"KLUETILG",0.5787 +"FRTILGG1I",0.5648 +"DAYLG1G2",0.6302 +"RGRTG1G2",0.8237 +"RDRTMIN",0.01209 +"TVERN",20 +"CLITT0",300 +"CSOM0",16000 +"CNLITT0",50 +"CNSOMF0",15 +"CNSOMS0",25 +"FCSOMF0",0.3 +"FLITTSOMF",0.6 +"FSOMFSOMS",0.03 +"RNLEACH",0.5 +"KNEMIT",5e-04 +"NMIN0",2 +"TCLITT",365 +"TCSOMF",1460 +"TCSOMS",73000 +"TMAXF",50 +"TSIGMAF",20 +"RFN2O",8.5 +"WFPS50N2O",0.7 +"NCSHMAX",0.06934 +"NCR",0.02195 +"RDRROOT",0.02 +"RDRSTUB",0.2086 +"NFERTMULT",1 +"FNCGSHMIN",0.327 +"TCNSHMOB",7.643 +"TCNUPT",27.6 +"F_DIGEST_WALL_FMIN",0.7 +"F_DIGEST_WALL_MAX",0.9 +"F_WALL_LV_FMIN",0.7 +"F_WALL_LV_MAX",0.6 +"F_WALL_ST_FMIN",0.8 +"F_WALL_ST_MAX",0.75 +"CLVDI",0 +"YIELDI",0 +"CSTUBI",0 +"DRYSTORI",0 +"FdepthI",0 +"SDEPTHI",0 +"TANAERI",0 +"WAPLI",0 +"WAPSI",0 +"WASI",0 +"WETSTORI",0 +"NRTI",0.0263 +"NSHI",0.0677 +"TILG1I",11 +"TILG2I",9 +"TILVI",980 +"WALI",380 +"O2I",3.5555 +"CSOMF0",4800 +"CSOMS0",11200 +"NLITT0",6 +"NSOMF0",320 +"NSOMS0",448 +"use_yasso",0 +"use_nitrogen",1 +"cn_h_min",10 +"totc_init",16000 +"fract_legacy_c",0.0 +"hist_mean_tempr",5.0 +"hist_yearly_precip",600 +"hist_tempr_ampl",10 +"yasso_alpha_a",0.51 +"yasso_alpha_w",5.19 +"yasso_alpha_e",0.13 +"yasso_alpha_n",0.10 +"yasso_beta_1",0.158 +"yasso_beta_2",-0.002 +"yasso_rate_pc",0.0 +"yasso_tresp_pc",0.0 +"CSOM_A",-10.0 +"CSOM_W",-10.0 +"CSOM_E",-10.0 +"CSOM_N",-10.0 +"CSOM_H",-10.0 +"NSOM",-10.0 +"TEMPR30",5.0 +"PRECIP30",1.5 diff --git a/models/basgra/inst/last_vals_basgra.Rdata b/models/basgra/inst/last_vals_basgra.Rdata new file mode 100644 index 00000000000..87fc3e48f1b Binary files /dev/null and b/models/basgra/inst/last_vals_basgra.Rdata differ diff --git a/models/basgra/inst/template.job b/models/basgra/inst/template.job index 288a142745d..28bdba46a62 100644 --- a/models/basgra/inst/template.job +++ b/models/basgra/inst/template.job @@ -15,7 +15,7 @@ if [ ! -e "@OUTDIR@/results.csv" ]; then # convert to MsTMIP echo "library (PEcAn.BASGRA) -run_BASGRA('@SITE_MET@', @RUN_PARAMS@, '@SITE_HARVEST@', '@SITE_FERTILIZE@', '@START_DATE@', '@END_DATE@', '@OUTDIR@', @SITE_LAT@, @SITE_LON@, '@SITE_CO2FILE@') +run_BASGRA('@SITE_MET@', @RUN_PARAMS@, '@SITE_HARVEST@', '@SITE_FERTILIZE@', '@START_DATE@', '@END_DATE@', '@OUTDIR@', @SITE_LAT@, @SITE_LON@, '@SITE_CO2FILE@', @WRITE_RAW_OUTPUT@) " | R --vanilla STATUS=$? diff --git a/models/basgra/man/run_BASGRA.Rd b/models/basgra/man/run_BASGRA.Rd index 8cbe14b3760..8fc469087a9 100644 --- a/models/basgra/man/run_BASGRA.Rd +++ b/models/basgra/man/run_BASGRA.Rd @@ -14,7 +14,8 @@ run_BASGRA( outdir, sitelat, sitelon, - co2_file = NULL + co2_file = NULL, + write_raw_output = FALSE ) } \arguments{ @@ -37,6 +38,8 @@ run_BASGRA( \item{sitelon}{longitude of the site} \item{co2_file}{path to daily atmospheric CO2 concentration file, optional, defaults to 350 ppm when missing} + +\item{write_raw_output}{write raw output in csv or not} } \description{ BASGRA wrapper function. Runs and writes model outputs in PEcAn standard. @@ -49,5 +52,5 @@ write.config.BASGRA modifies args of this function through template.job then job.sh runs calls this function to run the model } \author{ -Istem Fer +Istem Fer, Julius Vira } diff --git a/models/basgra/src/BASGRA.f90 b/models/basgra/src/BASGRA.f90 index 88804c13ee4..7c935c56998 100644 --- a/models/basgra/src/BASGRA.f90 +++ b/models/basgra/src/BASGRA.f90 @@ -1,7 +1,8 @@ -subroutine BASGRA( PARAMS, MATRIX_WEATHER, & - CALENDAR_FERT, CALENDAR_NDEP, DAYS_HARVEST, & - NDAYS, NOUT, & - y) +subroutine BASGRA(PARAMS, MATRIX_WEATHER, & + CALENDAR_FERT, CALENDAR_NDEP, DAYS_HARVEST, & + HARVEST_PARAMS, & + NPARAMS, NDAYS, NOUT, & + y) !------------------------------------------------------------------------------- ! This is the BASic GRAss model originally written in MATLAB/Simulink by Marcel ! van Oijen, Mats Hoglind, Stig Morten Thorsen and Ad Schapendonk. @@ -21,22 +22,29 @@ subroutine BASGRA( PARAMS, MATRIX_WEATHER, & use resources use soil use plant +use yasso +use set_params_mod implicit none - -integer, dimension(300,3) :: DAYS_HARVEST -real :: PARAMS(140) #ifdef weathergen integer, parameter :: NWEATHER = 7 #else integer, parameter :: NWEATHER = 9 #endif -real :: MATRIX_WEATHER(NMAXDAYS,NWEATHER) -real , dimension(300,3) :: CALENDAR_FERT, CALENDAR_NDEP +real, intent(in) :: PARAMS(NPARAMS) +real, intent(in) :: MATRIX_WEATHER(NMAXDAYS,NWEATHER) +real, intent(in) :: CALENDAR_FERT(300, 6) +real, intent(in) :: CALENDAR_NDEP(300, 3) +integer, intent(in) :: DAYS_HARVEST(300, 2) +real, intent(in) :: HARVEST_PARAMS(300, 2) ! (day, 1=CLAIV, 2={if > 0, cut only}) +integer, intent(in) :: NPARAMS, NDAYS, NOUT +real, intent(out) :: y(NDAYS,NOUT) + + integer, dimension(300,2) :: DAYS_FERT , DAYS_NDEP -real , dimension(300) :: NFERTV , NDEPV +real , dimension(300,4) :: NFERTV ! (day,[mineral-N, organic-N, soluble-C, compost-C]) +real , dimension(300) :: NDEPV -integer :: day, doy, i, NDAYS, NOUT, year -real :: y(NDAYS,NOUT) +integer :: day, doy, i, year ! State variables plants real :: CLV, CLVD, CRES, CRT, CST, CSTUB, LAI, LT50, PHEN @@ -63,12 +71,34 @@ subroutine BASGRA( PARAMS, MATRIX_WEATHER, & real :: O2OUT, PackMelt, poolDrain, poolInfil, Psnow, reFreeze, RESMOB real :: RGRTVG1, RROOTD, SnowMelt, THAWPS, THAWS, TILVG1, TILG1G2, TRAN, Wremain real :: NCSHI, NCGSH, NCDSH, NCHARVSH, GNSH, DNSH, HARVNSH, GNRT, DNRT +real :: ALLOTOT, GRESSI real :: NSHmob, NSHmobsoil, Nupt - +real :: input_soluble_c, input_compost_c ! from organic amendments/fertilizers +real :: input_org_n +real :: nupt_max, nupt_max_adj real :: Ndep, Nfert real :: F_DIGEST_DM, F_DIGEST_DMSH, F_DIGEST_LV, F_DIGEST_ST, F_DIGEST_WALL real :: F_WALL_DM , F_WALL_DMSH , F_WALL_LV , F_WALL_ST +logical :: if_cut_only ! add harvested biomass to litter, not yield fluxes +real :: harv_c_to_litt, harv_n_to_litt +real :: harv_c_exported +! yasso +real :: yasso_cstate(statesize_yasso) +real :: yasso_ctend(statesize_yasso) +real :: runoff_cstate(statesize_yasso) +real :: yasso_nstate +real :: yasso_ntend +real :: yasso_met_state(2, 31) ! for calculating 30-day averages of tempr & precip +real :: yasso_met(2) ! 30-day rolling tempr, precip +integer :: yasso_met_ind ! counter for averaging the met variables +real :: cflux_to_yasso(statesize_yasso) +real :: yasso_param(num_params_y20) +real :: org_n_to_yasso + +if (NOUT < 118) then + call rexit('NOUT < 118 too small') +end if ! Parameters call set_params(PARAMS) @@ -92,7 +122,8 @@ subroutine BASGRA( PARAMS, MATRIX_WEATHER, & ! Calendars DAYS_FERT = CALENDAR_FERT (:,1:2) DAYS_NDEP = CALENDAR_NDEP (:,1:2) -NFERTV = CALENDAR_FERT (:,3) * NFERTMULT +NFERTV = CALENDAR_FERT (:,3:6) * NFERTMULT + NDEPV = CALENDAR_NDEP (:,3) ! Initial constants for plant state variables @@ -120,10 +151,54 @@ subroutine BASGRA( PARAMS, MATRIX_WEATHER, & Nfert_TOT = 0 DM_MAX = 0 -! Initial constants for soil state variables -CLITT = CLITT0 -CSOMF = CSOMF0 -CSOMS = CSOMS0 +if (use_yasso) then + ! Yasso currently requires DELT = 1 + if (abs(DELT - 1.0) > 1e-6) then + call rexit('Yasso soil model requires DELT = 1') + end if + yasso_met_ind = 1 + call get_params(param_y20_map, yasso_alpha_awen, yasso_beta12, yasso_decomp_pc, yasso_param) + ! call initialize(& + ! yasso_param, & + ! 0.3 * hist_carbon_input / 365.0, & + ! 0.7 * hist_carbon_input / 365.0, & + ! hist_carbon_input * 0.02 / 365.0, & ! C:N 50 for carbon input + ! hist_mean_tempr, hist_yearly_precip / 365.0, hist_tempr_ampl, & + ! totc_min_init, & + ! yasso_cstate, & + ! yasso_nstate) + if (sum(yasso_cstate_init) > 0.0) then + yasso_cstate = yasso_cstate_init + if (.not. yasso_nstate_init > 0.0) then + call rexit('yasso initial nstate is zero but cstate is not') + end if + yasso_nstate = yasso_nstate_init + else + call initialize_totc(& + yasso_param, & + totc_init, & + cn_input = 50.0, & + fract_root_input = 0.7, & + fract_legacy_soc = fract_legacy_c, & + tempr_c = hist_mean_tempr, & + precip_day = hist_yearly_precip / 365.0, & + tempr_ampl = hist_tempr_ampl, & + cstate = yasso_cstate, nstate = yasso_nstate) + end if +else + ! Initial constants for soil state variables + CLITT = CLITT0 + CSOMF = CSOMF0 + CSOMS = CSOMS0 + NLITT = NLITT0 + NSOMF = NSOMF0 + NSOMS = NSOMS0 + ! missing values for yasso variables + yasso_cstate = -1e35 + yasso_nstate = -1e35 + yasso_met = -1e35 +end if + DRYSTOR = DRYSTORI Fdepth = FdepthI NLITT = NLITT0 @@ -140,7 +215,6 @@ subroutine BASGRA( PARAMS, MATRIX_WEATHER, & WETSTOR = WETSTORI do day = 1, NDAYS - ! Environment call set_weather_day(day,DRYSTOR, year,doy) call DDAYL (doy) @@ -162,40 +236,104 @@ subroutine BASGRA( PARAMS, MATRIX_WEATHER, & call FRDRUNIR (EVAP,Fdepth,Frate,INFIL,poolDRAIN,ROOTD,TRAN,WAL,WAS, & FREEZEL,IRRIG,THAWS) call O2status (O2,ROOTD) + ! Plant - call Harvest (CLV,CRES,CST,year,doy,DAYS_HARVEST,LAI,PHEN,TILG1,TILG2,TILV, & - GSTUB,HARVLA,HARVLV,HARVLVP,HARVPH,HARVRE,HARVREP,HARVST,HARVSTP,HARVTILG2) - + call Harvest (CLV,CRES,CST,year,doy,DAYS_HARVEST,HARVEST_PARAMS, LAI,PHEN,TILG1,TILG2,TILV, & + GSTUB,HARVLA,HARVLV,HARVLVP,HARVPH,HARVRE,HARVREP,HARVST,HARVSTP,HARVTILG2,if_cut_only) + if (if_cut_only) then + harv_c_to_litt = HARVLV + HARVST*HAGERE + HARVRE + harv_n_to_litt = HARVNSH + else + harv_c_to_litt = 0.0 + harv_n_to_litt = 0.0 + end if - CLV = CLV - HARVLV - CRES = CRES - HARVRE - CST = CST - HARVST - LAI = LAI - HARVLA - PHEN = min(1., PHEN - HARVPH) - TILG2 = TILG2 - HARVTILG2 - NSH = NSH - HARVNSH call Biomass (CLV,CRES,CST) call Phenology (DAYL,PHEN, DPHEN,GPHEN) call Foliage1 call LUECO2TM (PARAV) call HardeningSink (CLV,DAYL,doy,LT50,Tsurf) - call Growth (LAI,NSH,NMIN,CLV,CRES,CST,PARINT,TILG1,TILG2,TILV,TRANRF, & - GLV,GRES,GRT,GST,RESMOB,NSHmob) - call PlantRespiration(FO2,RESPHARD) + call Growth(LAI, NSH, NMIN, CLV, CRES, CST,& + PARINT,TILG1,TILG2,TILV,TRANRF, & + RESMOB, NSINK, NSHmob, nupt_max, & + ALLOTOT, GRESSI) + call Senescence (CLV,CRT,CSTUB,LAI,LT50,PERMgas,TANAER,TILV,Tsurf, & - DeHardRate,DLAI,DLV,DRT,DSTUB,dTANAER,DTILV,HardRate) + DeHardRate,DLAI,DLV,DRT,DSTUB,dTANAER,DTILV,HardRate) + + call N_fert (year,doy,DAYS_FERT,NFERTV, Nfert, input_soluble_c, input_compost_c, input_org_n) + + if (use_yasso) then + if (use_met_ema) then + if (day == 1) then + yasso_met(1:2) = yasso_met_init(1:2) + end if + call average_met_ema((/DAVTMP, RAIN/), yasso_met) + else + call average_met((/DAVTMP, RAIN/), yasso_met, 30, yasso_met_state, yasso_met_ind) + end if + call decompose(& + yasso_param, & + DELT, & ! timestep +! cflux_to_yasso, & ! segregated by the AWENH fraction +! org_n_to_yasso, & ! total organic N input + yasso_met(1), &! 30-day temperature + yasso_met(2), &! 30-day precip, + yasso_cstate, & + yasso_nstate, & + yasso_ctend, & + yasso_ntend) + + ! The nitrogen-uptake-related fluxes: + ! NSINK = how much N the plant can potentially use for growth + ! NSHmob = how much N can be mobilized internally + ! NSOURCE_ADJ = how much N the plant can receive after accounting for microbial N immobilization + ! NSOURCE = how much N the plant is able to use, from all sources + call adjust_nmin_fluxes(& + use_yasso, & + NMIN, & + nupt_max, & + yasso_ntend, & + nupt_max_adj, & + nmin_immob_yasso = Nmineralisation) + call Allocation(& + use_nitrogen, ALLOTOT, GRESSI, & + nupt_max_adj + NSHmob, & ! NSOURCE + NSINK, GRES, GRT, GLV, GST) + call CNSoil_stub(ROOTD, RWA, WFPS, WAL, GRT, yasso_cstate, yasso_nstate, NMIN, runoff_cstate) + Rsoil = -(sum(yasso_ctend)) + + else + call adjust_nmin_fluxes(& + use_yasso, & + NMIN, & + nupt_max, & + yasso_ntend, & + nupt_max_adj = nupt_max_adj) + call Allocation(& + use_nitrogen, ALLOTOT, GRESSI, & + nupt_max_adj + NSHmob, & ! NSOURCE + NSINK, GRES, GRT, GLV, GST) + ! The inputs are zeroed because they are currently ignored when run without YASSO. + input_soluble_c = 0.0 + input_compost_c = 0.0 + call CNsoil (ROOTD,RWA,WFPS,WAL,GRT,CLITT,CSOMF,NLITT,NSOMF,NSOMS,NMIN,CSOMS) + end if + + call PlantRespiration(FO2,RESPHARD) ! must be after allocation + call Foliage2 (DAYL,GLV,LAI,TILV,TILG1,TRANRF,Tsurf,VERN, & GLAI,GTILV,TILVG1,TILG1G2) ! Soil 2 call O2fluxes (O2,PERMgas,ROOTD,RplantAer, O2IN,O2OUT) - call N_fert (year,doy,DAYS_FERT,NFERTV, Nfert) call N_dep (year,doy,DAYS_NDEP,NDEPV, Ndep) - call CNsoil (ROOTD,RWA,WFPS,WAL,GRT,CLITT,CSOMF,NLITT,NSOMF,NSOMS,NMIN,CSOMS) + + call Nplant (NSHmob,CLV,CRT,CST,DLAI,DLV,DRT,GLAI,GLV,GRT,GST, & HARVLA,HARVLV,HARVST,LAI,NRT,NSH, & DNRT,DNSH,GNRT,GNSH,HARVNSH, & - NCDSH,NCGSH,NCHARVSH,NSHmobsoil,Nupt) + NCDSH,NCGSH,NCHARVSH,NSHmobsoil,Nupt) ! Extra variables DMLV = CLV / 0.45 ! Leaf dry matter; g m-2 @@ -234,7 +372,13 @@ subroutine BASGRA( PARAMS, MATRIX_WEATHER, & if((LAT>0).AND.(doy==305)) VERN = 0 if((LAT<0).AND.(doy==122)) VERN = 0 if(DAVTMP0) YIELD_LAST = YIELD YIELD_TOT = YIELD_TOT + YIELD @@ -246,19 +390,45 @@ subroutine BASGRA( PARAMS, MATRIX_WEATHER, & Nfert_TOT = Nfert_TOT + Nfert DM_MAX = max( DM, DM_MAX ) + + CLV = CLV - HARVLV + CRES = CRES - HARVRE + CST = CST - HARVST + LAI = LAI - HARVLA + PHEN = min(1., PHEN - HARVPH) + TILG2 = TILG2 - HARVTILG2 + NSH = NSH - HARVNSH + + + ! State equations soil + if (use_yasso) then + call inputs_to_fractions(& + leaf = DSTUB + DLV + harv_c_to_litt, & + root = DRT, & + soluble = input_soluble_c, & + compost = input_compost_c, & + fract = cflux_to_yasso) + org_n_to_yasso = DNSH + DNRT + input_org_n + harv_n_to_litt + yasso_cstate = yasso_cstate + yasso_ctend + cflux_to_yasso - runoff_cstate + yasso_nstate = yasso_nstate + yasso_ntend + org_n_to_yasso - rNSOMF + else + CLITT = CLITT + DLV + DSTUB + harv_c_to_litt - rCLITT - dCLITT + CSOMF = CSOMF + DRT + dCLITTsomf - rCSOMF - dCSOMF + CSOMS = CSOMS + dCSOMFsoms - dCSOMS + NLITT = NLITT + DNSH + harv_n_to_litt - rNLITT - dNLITT + NSOMF = NSOMF + DNRT + NLITTsomf - rNSOMF - dNSOMF + NSOMS = NSOMS + NSOMFsoms - dNSOMS + end if -! State equations soil - CLITT = CLITT + DLV + DSTUB - rCLITT - dCLITT - CSOMF = CSOMF + DRT + dCLITTsomf - rCSOMF - dCSOMF - CSOMS = CSOMS + dCSOMFsoms - dCSOMS DRYSTOR = DRYSTOR + reFreeze + Psnow - SnowMelt Fdepth = Fdepth + Frate - NLITT = NLITT + DNSH - rNLITT - dNLITT - NSOMF = NSOMF + DNRT + NLITTsomf - rNSOMF - dNSOMF - NSOMS = NSOMS + NSOMFsoms - dNSOMS - NMIN = NMIN + Ndep + Nfert + Nmineralisation + Nfixation + NSHmobsoil & - - Nupt - Nleaching - Nemission - NMIN = max(0.,NMIN) + if (.not. use_nitrogen) then + NMIN = -1e35 + else + NMIN = NMIN + Ndep + Nfert + Nmineralisation + Nfixation + NSHmobsoil & + - Nupt - Nleaching - Nemission + NMIN = max(0.,NMIN) + end if O2 = O2 + O2IN - O2OUT Sdepth = Sdepth + Psnow/RHOnewSnow - PackMelt TANAER = TANAER + dTANAER @@ -389,6 +559,20 @@ subroutine BASGRA( PARAMS, MATRIX_WEATHER, & y(day,103) = EVAP y(day,104) = TRAN + y(day,105) = DLV+DSTUB ! FLITTC_LEAF + y(day,106) = DRT ! FLITTC_ROOT + + y(day,107) = Rsoil - GRT - GST - GLV - GRES + RESMOB ! NEE + y(day,108) = harv_c_exported + y(day,109) = rCLITT + rCSOMF ! C RUNOFF + + ! yasso outputs + y(day,110:114) = yasso_cstate + y(day,115) = yasso_nstate + y(day,116:117) = yasso_met + + y(day,118) = input_soluble_c + input_compost_c ! C added in soil amendments + enddo -end \ No newline at end of file +end diff --git a/models/basgra/src/Makevars b/models/basgra/src/Makevars index 84a911ee62b..741ee8de74b 100644 --- a/models/basgra/src/Makevars +++ b/models/basgra/src/Makevars @@ -9,7 +9,8 @@ PKG_FCFLAGS = -x f95-cpp-input -fdefault-real-8 PARAMS = \ parameters_site.o \ - parameters_plant.o + parameters_plant.o \ + yasso.o NEEDS_PARAMS = \ environment.o \ diff --git a/models/basgra/src/init.c b/models/basgra/src/init.c index 03f9965bb0f..8324d96c7b1 100644 --- a/models/basgra/src/init.c +++ b/models/basgra/src/init.c @@ -6,7 +6,7 @@ extern void F77_NAME(basgra)(void *, void *, void *, void *, void *, void *, void *, void *); static const R_FortranMethodDef FortranEntries[] = { - {"basgra", (DL_FUNC) &F77_NAME(basgra), 8}, + {"basgra", (DL_FUNC) &F77_NAME(basgra), 10}, {NULL, NULL, 0} }; diff --git a/models/basgra/src/parameters_site.f90 b/models/basgra/src/parameters_site.f90 index 512383ea63c..d7fb09a07b4 100644 --- a/models/basgra/src/parameters_site.f90 +++ b/models/basgra/src/parameters_site.f90 @@ -59,5 +59,22 @@ module parameters_site ! SA parameters real :: NFERTMULT +! Yasso +logical :: use_yasso +logical :: use_nitrogen +real :: hist_carbon_input +real :: fract_legacy_c +real :: hist_mean_tempr +real :: hist_yearly_precip +real :: hist_tempr_ampl +real :: totc_init ! used in initialization +real :: yasso_alpha_awen(4) ! base decomposition rates for the faster-cycling pools +real :: yasso_beta12(2) ! temperature sensitivity parameters +! principal component parametrization for the decomposition rates and temperature sensitivity +real :: yasso_decomp_pc(2) +! initial YASSO C & N state +real :: yasso_cstate_init(5) +real :: yasso_nstate_init +real :: yasso_met_init(2) end module parameters_site diff --git a/models/basgra/src/plant.f90 b/models/basgra/src/plant.f90 index 9b571b918ac..d6bdf05d71d 100644 --- a/models/basgra/src/plant.f90 +++ b/models/basgra/src/plant.f90 @@ -16,14 +16,17 @@ module plant Contains -Subroutine Harvest(CLV,CRES,CST,year,doy,DAYS_HARVEST,LAI,PHEN,TILG1,TILG2,TILV, & - GSTUB,HARVLA,HARVLV,HARVLVP,HARVPH,HARVRE,HARVREP,HARVST,HARVSTP,HARVTILG2) - integer :: doy,year - integer,dimension(300,3) :: DAYS_HARVEST - real :: CLV, CRES, CST, LAI, PHEN, TILG1, TILG2, TILV - real :: GSTUB, HARVLV, HARVLVP, HARVLA, HARVRE, HARVREP, HARVTILG2, HARVST, HARVSTP, HARVPH +Subroutine Harvest(CLV,CRES,CST,year,doy,DAYS_HARVEST,HARVEST_PARAMS, LAI,PHEN,TILG1,TILG2,TILV, & + GSTUB,HARVLA,HARVLV,HARVLVP,HARVPH,HARVRE,HARVREP,HARVST,HARVSTP,HARVTILG2, if_cut_only) + integer, intent(in) :: doy,year + integer, intent(in), dimension(300,2) :: DAYS_HARVEST + real, intent(in), dimension(300, 2) :: HARVEST_PARAMS + real, intent(in) :: CLV, CRES, CST, LAI, PHEN, TILG1, TILG2, TILV + real, intent(out) :: GSTUB, HARVLV, HARVLVP, HARVLA, HARVRE, HARVREP, HARVTILG2, HARVST, HARVSTP, HARVPH + logical, intent(out) :: if_cut_only ! if the cut grass is left on the field and not to be added to the yield pool + real :: CLAIV, CLAI, HARVFR, TV1 - integer :: HARV, HARVP, TEMPOP, i + integer :: HARV, HARVP, i HARVP = 1 HARV = 0 @@ -33,9 +36,10 @@ Subroutine Harvest(CLV,CRES,CST,year,doy,DAYS_HARVEST,LAI,PHEN,TILG1,TILG2,TILV, if ( (year==DAYS_HARVEST(i,1)) .and. (doy==DAYS_HARVEST(i,2)) ) then HARV = 1 NOHARV = 0 - TEMPOP = DAYS_HARVEST(i,3) - CLAIV = TEMPOP * 0.1 - end if + CLAIV = HARVEST_PARAMS(i,1) + if_cut_only = HARVEST_PARAMS(i,2) > 0.0 + exit + end if end do FRACTV = (TILV+TILG1) / (TILV+TILG1+TILG2) CLAI = FRACTV * CLAIV @@ -137,12 +141,20 @@ Subroutine HardeningSink(CLV,DAYL,doy,LT50,Tsurf) RESPHARDSI = RATEH * CLV * KRESPHARD * max(0.,min(1., RESNOR*5. )) end Subroutine HardeningSink -Subroutine Growth(LAI,NSH,NMIN,CLV,CRES,CST,PARINT,TILG1,TILG2,TILV,TRANRF, & - GLV,GRES,GRT,GST,RESMOB,NSHmob) - real :: LAI,NSH,NMIN,CLV,CRES,CST,PARINT,TILG1,TILG2,TILV,TRANRF - real :: GLV,GRES,GRT,GST,RESMOB,NSHmob - real :: ALLOTOT,CSTAV,GRESSI,SOURCE +Subroutine Growth(LAI,NSH,NMIN,CLV,CRES,CST,PARINT,TILG1,TILG2,TILV, TRANRF, & + RESMOB, NSINK, NSHmob, nupt_max, ALLOTOT, GRESSI) + real, intent(in) :: LAI, NSH, NMIN, CLV, CRES, CST, PARINT, TILG1, TILG2 + real, intent(in) :: TILV, TRANRF + real, intent(out) :: RESMOB + real, intent(out) :: NSINK + real, intent(out) :: NSHmob + real, intent(out) :: nupt_max + real, intent(out) :: ALLOTOT + real, intent(out) :: GRESSI + + real :: CSTAV,SOURCE real :: NSHEXCESS + PHOT = PARINT * TRANRF * 12. * LUEMXQ * NOHARV RESMOB = (CRES * NOHARV / TCRES) * max(0.,min( 1.,DAVTMP/5. )) SOURCE = RESMOB + PHOT @@ -166,45 +178,54 @@ Subroutine Growth(LAI,NSH,NMIN,CLV,CRES,CST,PARINT,TILG1,TILG2,TILV,TRANRF, & NSHK = (CLV+CST)*NCSHMAX * (1.-exp(-K*LAI))/(K*LAI) NSHEXCESS = max( 0., NSH-NSHK ) NSHmob = NOHARV * NSHEXCESS / TCNSHMOB - NSOURCE = NMIN/TCNUPT + NSHmob NSINK = max(0., (GLVSI+GSTSI)*NCSHMAX ) -! NSINK = (GLVSI+GSTSI)*NCSHMAX - fNgrowth = min( 1., NSOURCE / NSINK ) + NUPT_MAX = min(NSINK-NSHmob, NMIN / TCNUPT) + +end Subroutine Growth + +Subroutine Allocation(use_nitrogen, ALLOTOT, GRESSI, NSOURCE, NSINK, GRES,GRT,GLV,GST) + logical, intent(in) :: use_nitrogen + real, intent(in) :: ALLOTOT + real, intent(in) :: GRESSI + real, intent(in) :: NSOURCE ! N available for growth from reserves and soil + real, intent(in) :: NSINK ! N required for maximum (N-unlimited) growth + real, intent(out) :: GRES, GRT, GLV, GST + + real :: GSHSI, ALLOSH, ALLORT, ALLOLV, ALLOST + + if (use_nitrogen) then + fNgrowth = min( 1., NSOURCE / NSINK ) + else + fNgrowth = 1.0 + end if GLAISI = GLAISI * fNgrowth GLVSI = GLVSI * fNgrowth GSTSI = GSTSI * fNgrowth - call Allocation(ALLOTOT,GRESSI,GRES,GRT,GLV,GST) -end Subroutine Growth - - Subroutine Allocation(ALLOTOT,GRESSI, GRES,GRT,GLV,GST) - real :: ALLOTOT, GRESSI - real :: GRES, GRT, GLV, GST - real :: GSHSI, ALLOSH, ALLORT, ALLOLV, ALLOST - GSHSI = GLVSI + GSTSI - if (DAYLGE >= 0.1) then + GSHSI = GLVSI + GSTSI + if (DAYLGE >= 0.1) then ! Situation 1: Growth has priority over storage (spring and growth period) - ! Calculate amount of assimilates allocated to shoot - ALLOSH = min( ALLOTOT, GSHSI ) - ! Calculate amount of assimilates allocated to reserves - GRES = min( ALLOTOT - ALLOSH, GRESSI) - else - ! Calculate amount of assimilates allocated to reserves - GRES = min( ALLOTOT, GRESSI ) - ! Calculate amount of assimilates allocated to shoot - ALLOSH = min( ALLOTOT - GRES, GSHSI ) - end if - ! All surplus carbohydrate goes to roots - ALLORT = ALLOTOT - ALLOSH - GRES - if (GSHSI == 0.) GSHSI = 1 - ALLOLV = GLVSI * (ALLOSH / GSHSI) - ALLOST = GSTSI * (ALLOSH / GSHSI) - GLV = ALLOLV * YG - GST = ALLOST * YG - GRT = ALLORT * YG - RESPGSH = (ALLOLV + ALLOST) * (1-YG) - RESPGRT = ALLORT * (1-YG) - end Subroutine Allocation + ! Calculate amount of assimilates allocated to shoot + ALLOSH = min( ALLOTOT, GSHSI ) + ! Calculate amount of assimilates allocated to reserves + GRES = min( ALLOTOT - ALLOSH, GRESSI) + else + ! Calculate amount of assimilates allocated to reserves + GRES = min( ALLOTOT, GRESSI ) + ! Calculate amount of assimilates allocated to shoot + ALLOSH = min( ALLOTOT - GRES, GSHSI ) + end if + ! All surplus carbohydrate goes to roots + ALLORT = ALLOTOT - ALLOSH - GRES + if (GSHSI == 0.) GSHSI = 1 + ALLOLV = GLVSI * (ALLOSH / GSHSI) + ALLOST = GSTSI * (ALLOSH / GSHSI) + GLV = ALLOLV * YG + GST = ALLOST * YG + GRT = ALLORT * YG + RESPGSH = (ALLOLV + ALLOST) * (1-YG) + RESPGRT = ALLORT * (1-YG) +end Subroutine Allocation Subroutine PlantRespiration(FO2,RESPHARD) real :: FO2,RESPHARD @@ -238,40 +259,40 @@ Subroutine Senescence(CLV,CRT,CSTUB,LAI,LT50,PERMgas,TANAER,TILV,Tsurf, & end Subroutine Senescence - Subroutine AnaerobicDamage(LT50,PERMgas,TANAER, dTANAER) - real :: LT50,PERMgas,TANAER - real :: dTANAER,LD50 - if (PERMgas==0.) then - dTANAER = 1. - else - dTANAER = -TANAER / DELT - end if - LD50 = LDT50A + LDT50B * LT50 - if (TANAER > 0.) then - RDRTOX = KRDRANAER / (1.+exp(-KRDRANAER*(TANAER-LD50))) - else - RDRTOX = 0. - end if - end Subroutine AnaerobicDamage +Subroutine AnaerobicDamage(LT50,PERMgas,TANAER, dTANAER) + real :: LT50,PERMgas,TANAER + real :: dTANAER,LD50 + if (PERMgas==0.) then + dTANAER = 1. + else + dTANAER = -TANAER / DELT + end if + LD50 = LDT50A + LDT50B * LT50 + if (TANAER > 0.) then + RDRTOX = KRDRANAER / (1.+exp(-KRDRANAER*(TANAER-LD50))) + else + RDRTOX = 0. + end if +end Subroutine AnaerobicDamage - Subroutine Hardening(CLV,LT50,Tsurf, DeHardRate,HardRate) - real :: CLV,LT50,Tsurf - real :: DeHardRate,HardRate - real :: RATED,RSR3H,RSRDAY - RSR3H = 1. / (1.+exp(-KRSR3H*(Tsurf-LT50))) - ! RDRFROST should be less than 1 to avoid numerical problems - ! (loss of all biomass but keeping positive reserves). We cap it at 0.5. - RSRDAY = RSR3H ! In previous versions we had RSRDAY = RSR3H^8 which understimated survival - RDRFROST = min( 0.5, 1. - RSRDAY ) - RATED = min( Dparam*(LT50MX-LT50)*(Tsurf+TsurfDiff), (LT50MX-LT50)/DELT ) - DeHardRate = max(0.,min( RATEDMX, RATED )) -! HardRate = RESPHARD / (CLV * KRESPHARD) - if (CLV > 0.) then - HardRate = RESPHARD / (CLV * KRESPHARD) - else - HardRate = 0. - end if - end Subroutine Hardening +Subroutine Hardening(CLV,LT50,Tsurf, DeHardRate,HardRate) + real :: CLV,LT50,Tsurf + real :: DeHardRate,HardRate + real :: RATED,RSR3H,RSRDAY + RSR3H = 1. / (1.+exp(-KRSR3H*(Tsurf-LT50))) + ! RDRFROST should be less than 1 to avoid numerical problems + ! (loss of all biomass but keeping positive reserves). We cap it at 0.5. + RSRDAY = RSR3H ! In previous versions we had RSRDAY = RSR3H^8 which understimated survival + RDRFROST = min( 0.5, 1. - RSRDAY ) + RATED = min( Dparam*(LT50MX-LT50)*(Tsurf+TsurfDiff), (LT50MX-LT50)/DELT ) + DeHardRate = max(0.,min( RATEDMX, RATED )) + ! HardRate = RESPHARD / (CLV * KRESPHARD) + if (CLV > 0.) then + HardRate = RESPHARD / (CLV * KRESPHARD) + else + HardRate = 0. + end if +end Subroutine Hardening Subroutine Foliage2(DAYL,GLV,LAI,TILV,TILG1,TRANRF,Tsurf,VERN, GLAI,GTILV,TILVG1,TILG1G2) real :: DAYL,GLV,LAI,TILV,TILG1,TRANRF,Tsurf diff --git a/models/basgra/src/set_params.f90 b/models/basgra/src/set_params.f90 index c486f018054..564dc83934f 100644 --- a/models/basgra/src/set_params.f90 +++ b/models/basgra/src/set_params.f90 @@ -1,179 +1,207 @@ -Subroutine set_params(pa) - -use parameters_site -use parameters_plant -implicit none -real :: pa(140) ! The length of pa() should be at least as high as the number of parameters - -! Initial constants -LOG10CLVI = pa(1) -LOG10CRESI = pa(2) -LOG10CRTI = pa(3) -CSTI = pa(4) -LOG10LAII = pa(5) -PHENI = pa(6) -TILTOTI = pa(7) -FRTILGI = pa(8) -LT50I = pa(9) - -! Process parameters -! CLAIV = pa(10) -COCRESMX = pa(11) -CSTAVM = pa(12) -DAYLB = pa(13) -DAYLP = pa(14) -DLMXGE = pa(15) -FSLAMIN = pa(16) -FSMAX = pa(17) -HAGERE = pa(18) -K = pa(19) -LAICR = pa(20) -LAIEFT = pa(21) -LAITIL = pa(22) -LFWIDG = pa(23) -LFWIDV = pa(24) -NELLVM = pa(25) -PHENCR = pa(26) -PHY = pa(27) -RDRSCO = pa(28) -RDRSMX = pa(29) -RDRTEM = pa(30) -RGENMX = pa(31) -ROOTDM = pa(32) -RRDMAX = pa(33) -RUBISC = pa(34) -SHAPE = pa(35) -SIMAX1T = pa(36) -SLAMAX = pa(37) -TBASE = pa(38) -TCRES = pa(39) -TOPTGE = pa(40) -TRANCO = pa(41) -YG = pa(42) - -LAT = pa(43) -WCI = pa(44) -FWCAD = pa(45) -FWCWP = pa(46) -FWCFC = pa(47) -FWCWET = pa(48) -WCST = pa(49) -WpoolMax = pa(50) - -Dparam = pa(51) -FGAS = pa(52) -FO2MX = pa(53) -gamma = pa(54) -Hparam = pa(55) -KRDRANAER = pa(56) -KRESPHARD = pa(57) -KRSR3H = pa(58) -KRTOTAER = pa(59) -KSNOW = pa(60) -LAMBDAsoil = pa(61) -LDT50A = pa(62) -LDT50B = pa(63) -LT50MN = pa(64) -LT50MX = pa(65) -RATEDMX = pa(66) -reHardRedDay = pa(67) -RHOnewSnow = pa(68) -RHOpack = pa(69) -SWret = pa(70) -SWrf = pa(71) -THARDMX = pa(72) -TmeltFreeze = pa(73) -TrainSnow = pa(74) -TsurfDiff = pa(75) -KLUETILG = pa(76) -FRTILGG1I = pa(77) -DAYLG1G2 = pa(78) -RGRTG1G2 = pa(79) -RDRTMIN = pa(80) -TVERN = pa(81) - -CLITT0 = pa( 82) ! (g C m-2) Initial C in litter -CSOM0 = pa( 83) ! (g C m-2) Initial C in OM -CNLITT0 = pa( 84) ! (g C g-1 N) Initial C/N ratio of litter -CNSOMF0 = pa( 85) ! (g C g-1 N) Initial C/N ratio of fast-decomposing OM -CNSOMS0 = pa( 86) ! (g C g-1 N) Initial C/N ratio of slowly decomposing OM -FCSOMF0 = pa( 87) ! (-) Initial C in fast-decomposing OM as a fraction of total OM -FLITTSOMF = pa( 88) ! (-) Fraction of decomposing litter that becomes OM -FSOMFSOMS = pa( 89) ! (-) Fraction of decomposing 'fast' OM that becomes slowly decomposing OM -RNLEACH = pa( 90) ! (-) Mineral N concentration of drainage water as a ratio of that in soil water -KNEMIT = pa( 91) ! (d-1) Max. relative emission rate of soil mineral N -NMIN0 = pa( 92) ! (g N m-2) Initial mineral N -TCLITT = pa( 93) ! (d) Residence time of litter -TCSOMF = pa( 94) ! (d) Residence time of fast-decomposing OM -TCSOMS = pa( 95) ! (d) Residence time of slowly decomposing OM -TMAXF = pa( 96) ! (degC) Temperature at which soil decomposition (fTsoil) is max. -TSIGMAF = pa( 97) ! (degC) Tolerance of soil decomposition for suboptimal temperature -RFN2O = pa( 98) ! (-) Sensitivity of the N2O/NO emission ratio to extreme values of water-filled pore space -WFPS50N2O = pa( 99) ! (-) Water-filled pore space at which the N2O and NO emission rates are equal - -! Parameters for N-processes -NCSHMAX = pa(100) ! (g N g-1 C) -NCR = pa(101) ! (g N g-1 C) - -! Senescence of roots and stubble -RDRROOT = pa(102) -RDRSTUB = pa(103) - -! Parameter for sensitivity analysis of fertilisation -NFERTMULT = pa(104) ! Multiplication factor for changing fertlisation (default = 1) - -! Additional parameters for N-processes -FNCGSHMIN = pa(105) -TCNSHMOB = pa(106) -TCNUPT = pa(107) - -F_DIGEST_WALL_FMIN = pa(108) -F_DIGEST_WALL_MAX = pa(109) -F_WALL_LV_FMIN = pa(110) -F_WALL_LV_MAX = pa(111) -F_WALL_ST_FMIN = pa(112) -F_WALL_ST_MAX = pa(113) - -CLVDI = pa(114) -YIELDI = pa(115) -CSTUBI = pa(116) - -DRYSTORI = pa(117) -FdepthI = pa(118) -SDEPTHI = pa(119) -TANAERI = pa(120) -WAPLI = pa(121) -WAPSI = pa(122) -WASI = pa(123) -WETSTORI = pa(124) - -NRTI = pa(125) -NSHI = pa(126) - -TILG1I = pa(127) -TILG2I = pa(128) -TILVI = pa(129) - -WALI = pa(130) -O2I = pa(131) - -CSOMF0 = pa(132) -CSOMS0 = pa(133) -NLITT0 = pa(134) -NSOMF0 = pa(135) -NSOMS0 = pa(136) - - -! Parameter transformations -CLVI = LOG10CLVI -CRESI = LOG10CRESI -CRTI = LOG10CRTI -LAII = LOG10LAII - -WCAD = FWCAD * WCST -WCWP = FWCWP * WCST -WCFC = FWCFC * WCST -WCWET = FWCWET * WCST - -return -end +module set_params_mod +contains + + Subroutine set_params(pa) + + use parameters_site + use parameters_plant + use yasso, only: nc_h_max + implicit none + real :: pa(:) ! The length of pa() should be at least as high as the number of parameters + + real :: cn_h_min + + if (size(pa) < 160) then + call rexit('parameter vector too small') + end if + + ! Initial constants + LOG10CLVI = pa(1) + LOG10CRESI = pa(2) + LOG10CRTI = pa(3) + CSTI = pa(4) + LOG10LAII = pa(5) + PHENI = pa(6) + TILTOTI = pa(7) + FRTILGI = pa(8) + LT50I = pa(9) + + ! Process parameters + ! CLAIV = pa(10) + COCRESMX = pa(11) + CSTAVM = pa(12) + DAYLB = pa(13) + DAYLP = pa(14) + DLMXGE = pa(15) + FSLAMIN = pa(16) + FSMAX = pa(17) + HAGERE = pa(18) + K = pa(19) + LAICR = pa(20) + LAIEFT = pa(21) + LAITIL = pa(22) + LFWIDG = pa(23) + LFWIDV = pa(24) + NELLVM = pa(25) + PHENCR = pa(26) + PHY = pa(27) + RDRSCO = pa(28) + RDRSMX = pa(29) + RDRTEM = pa(30) + RGENMX = pa(31) + ROOTDM = pa(32) + RRDMAX = pa(33) + RUBISC = pa(34) + SHAPE = pa(35) + SIMAX1T = pa(36) + SLAMAX = pa(37) + TBASE = pa(38) + TCRES = pa(39) + TOPTGE = pa(40) + TRANCO = pa(41) + YG = pa(42) + + LAT = pa(43) + WCI = pa(44) + FWCAD = pa(45) + FWCWP = pa(46) + FWCFC = pa(47) + FWCWET = pa(48) + WCST = pa(49) + WpoolMax = pa(50) + + Dparam = pa(51) + FGAS = pa(52) + FO2MX = pa(53) + gamma = pa(54) + Hparam = pa(55) + KRDRANAER = pa(56) + KRESPHARD = pa(57) + KRSR3H = pa(58) + KRTOTAER = pa(59) + KSNOW = pa(60) + LAMBDAsoil = pa(61) + LDT50A = pa(62) + LDT50B = pa(63) + LT50MN = pa(64) + LT50MX = pa(65) + RATEDMX = pa(66) + reHardRedDay = pa(67) + RHOnewSnow = pa(68) + RHOpack = pa(69) + SWret = pa(70) + SWrf = pa(71) + THARDMX = pa(72) + TmeltFreeze = pa(73) + TrainSnow = pa(74) + TsurfDiff = pa(75) + KLUETILG = pa(76) + FRTILGG1I = pa(77) + DAYLG1G2 = pa(78) + RGRTG1G2 = pa(79) + RDRTMIN = pa(80) + TVERN = pa(81) + + CLITT0 = pa( 82) ! (g C m-2) Initial C in litter + CSOM0 = pa( 83) ! (g C m-2) Initial C in OM + CNLITT0 = pa( 84) ! (g C g-1 N) Initial C/N ratio of litter + CNSOMF0 = pa( 85) ! (g C g-1 N) Initial C/N ratio of fast-decomposing OM + CNSOMS0 = pa( 86) ! (g C g-1 N) Initial C/N ratio of slowly decomposing OM + FCSOMF0 = pa( 87) ! (-) Initial C in fast-decomposing OM as a fraction of total OM + FLITTSOMF = pa( 88) ! (-) Fraction of decomposing litter that becomes OM + FSOMFSOMS = pa( 89) ! (-) Fraction of decomposing 'fast' OM that becomes slowly decomposing OM + RNLEACH = pa( 90) ! (-) Mineral N concentration of drainage water as a ratio of that in soil water + KNEMIT = pa( 91) ! (d-1) Max. relative emission rate of soil mineral N + NMIN0 = pa( 92) ! (g N m-2) Initial mineral N + TCLITT = pa( 93) ! (d) Residence time of litter + TCSOMF = pa( 94) ! (d) Residence time of fast-decomposing OM + TCSOMS = pa( 95) ! (d) Residence time of slowly decomposing OM + TMAXF = pa( 96) ! (degC) Temperature at which soil decomposition (fTsoil) is max. + TSIGMAF = pa( 97) ! (degC) Tolerance of soil decomposition for suboptimal temperature + RFN2O = pa( 98) ! (-) Sensitivity of the N2O/NO emission ratio to extreme values of water-filled pore space + WFPS50N2O = pa( 99) ! (-) Water-filled pore space at which the N2O and NO emission rates are equal + + ! Parameters for N-processes + NCSHMAX = pa(100) ! (g N g-1 C) + NCR = pa(101) ! (g N g-1 C) + + ! Senescence of roots and stubble + RDRROOT = pa(102) + RDRSTUB = pa(103) + + ! Parameter for sensitivity analysis of fertilisation + NFERTMULT = pa(104) ! Multiplication factor for changing fertlisation (default = 1) + + ! Additional parameters for N-processes + FNCGSHMIN = pa(105) + TCNSHMOB = pa(106) + TCNUPT = pa(107) + + F_DIGEST_WALL_FMIN = pa(108) + F_DIGEST_WALL_MAX = pa(109) + F_WALL_LV_FMIN = pa(110) + F_WALL_LV_MAX = pa(111) + F_WALL_ST_FMIN = pa(112) + F_WALL_ST_MAX = pa(113) + + CLVDI = pa(114) + YIELDI = pa(115) + CSTUBI = pa(116) + + DRYSTORI = pa(117) + FdepthI = pa(118) + SDEPTHI = pa(119) + TANAERI = pa(120) + WAPLI = pa(121) + WAPSI = pa(122) + WASI = pa(123) + WETSTORI = pa(124) + + NRTI = pa(125) + NSHI = pa(126) + + TILG1I = pa(127) + TILG2I = pa(128) + TILVI = pa(129) + + WALI = pa(130) + O2I = pa(131) + + CSOMF0 = pa(132) + CSOMS0 = pa(133) + NLITT0 = pa(134) + NSOMF0 = pa(135) + NSOMS0 = pa(136) + + ! extras for YASSO-BASGRA + use_yasso = pa(137) > 0 + use_nitrogen = pa(138) > 0 + cn_h_min = pa(139) + totc_init = pa(140) + fract_legacy_c = pa(141) + hist_mean_tempr = pa(142) + hist_yearly_precip = pa(143) + hist_tempr_ampl = pa(144) + + yasso_alpha_awen = pa(145:148) + yasso_beta12 = pa(149:150) + yasso_decomp_pc = pa(151:152) + yasso_cstate_init = pa(153:157) + yasso_nstate_init = pa(158) + yasso_met_init = pa(159:160) + + ! Parameter transformations + CLVI = LOG10CLVI + CRESI = LOG10CRESI + CRTI = LOG10CRTI + LAII = LOG10LAII + + WCAD = FWCAD * WCST + WCWP = FWCWP * WCST + WCFC = FWCFC * WCST + WCWET = FWCWET * WCST + + nc_h_max = 1 / cn_h_min + + end Subroutine set_params +end module set_params_mod diff --git a/models/basgra/src/soil.f90 b/models/basgra/src/soil.f90 index fb8323da9af..8d0d16f67aa 100644 --- a/models/basgra/src/soil.f90 +++ b/models/basgra/src/soil.f90 @@ -107,17 +107,31 @@ Subroutine O2fluxes(O2,PERMgas,ROOTD,RplantAer, O2IN,O2OUT) O2IN = PERMgas * ( (O2MX-O2) + O2OUT*DELT ) end Subroutine O2fluxes -Subroutine N_fert(year,doy,DAYS_FERT,NFERTV, Nfert) - integer :: year,doy,i - integer,dimension(300,2) :: DAYS_FERT - real ,dimension(300 ) :: NFERTV - real :: Nfert - Nfert = 0 - do i=1,300 - if ( (year==DAYS_FERT (i,1)) .and. (doy==DAYS_FERT (i,2)) ) then - Nfert = NFERTV (i) - end if +Subroutine N_fert(year,doy,DAYS_FERT,NFERTV, Nfert, input_soluble_c, input_compost_c, input_org_n) + integer, intent(in) :: year,doy + integer, intent(in), dimension(:,:) :: DAYS_FERT + real, intent(in), dimension(:,:) :: NFERTV + real, intent(out) :: Nfert + real, intent(out) :: input_soluble_c + real, intent(out) :: input_compost_c + real, intent(out) :: input_org_n + + integer :: i + + Nfert = 0.0 + input_soluble_c = 0.0 + input_compost_c = 0.0 + input_org_n = 0.0 + + do i=1,size(days_fert, 1) + if ( (year==DAYS_FERT (i,1)) .and. (doy==DAYS_FERT (i,2)) ) then + Nfert = NFERTV (i, 1) + input_org_n = NFERTV(i, 2) + input_soluble_c = NFERTV(i, 3) + input_compost_c = NFERTV(i, 4) + end if end do + end Subroutine N_fert Subroutine N_dep(year,doy,DAYS_NDEP,NDEPV, Ndep) @@ -183,5 +197,87 @@ Subroutine CNsoil(ROOTD,RWA,WFPS,WAL,GCR,CLITT,CSOMF,NLITT,NSOMF,NSOMS,NMIN,CSOM NemissionN2O = Nemission * fN2O NemissionNO = Nemission * (1.-fN2O) end Subroutine CNsoil + +subroutine CNSoil_stub(ROOTD, RWA, WFPS, WAL, GCR, cstate_yasso, nstate_yasso, NMIN, runoff_cstate) + use yasso, only : partition_nitr, statesize_yasso + real, intent(in) :: ROOTD + real, intent(in) :: RWA + real, intent(in) :: WFPS + real, intent(in) :: WAL + real, intent(in) :: GCR + real, intent(in) :: cstate_yasso(statesize_yasso) + real, intent(in) :: nstate_yasso + real, intent(in) :: NMIN + real, intent(out) :: runoff_cstate(statesize_yasso) + + real :: c_awen + real :: nitr_awen, nitr_h + real :: fN2O + + c_awen = sum(cstate_yasso(1:4)) + call partition_nitr(cstate_yasso, nstate_yasso, nitr_awen, nitr_h) + + runoff_cstate(1:4) = ((cstate_yasso(1:4)*RUNOFF) / ROOTD) * RRUNBULK * 0.001 + runoff_cstate(5) = 0.0 + rCSOMF = sum(runoff_cstate) + !rCSOMF = ((c_awen*RUNOFF) / ROOTD) * RRUNBULK * 0.001 + rNSOMF = ((nitr_awen*RUNOFF) / ROOTD) * RRUNBULK * 0.001 + rCLITT = 0.0 + rNLITT = 0.0 + ! N fixation, leaching, emission + Nfixation = gCR * KNFIX + ! Nleaching = (NMIN*RNLEACH*DRAIN) / WAL + if ((WAL > 0.) .and. (NMIN > 0.)) then + Nleaching = (NMIN*RNLEACH*DRAIN) / WAL + else + Nleaching = 0. + end if + Nemission = NMIN * KNEMIT * RWA + fN2O = 1. / (1. + exp(-RFN2O*(WFPS-WFPS50N2O))) + NemissionN2O = Nemission * fN2O + NemissionNO = Nemission * (1.-fN2O) +end subroutine CNSoil_stub + +subroutine adjust_nmin_fluxes(use_yasso, NMIN, nupt_max, yasso_ntend, nupt_max_adj, nmin_immob_yasso) + logical, intent(in) :: use_yasso + real, intent(in) :: NMIN ! current mineral N pool + real, intent(in) :: nupt_max ! plant N need + real, intent(in) :: yasso_ntend ! rate of change of the YASSO N pool + real, intent(out) :: nupt_max_adj ! plant uptake capacity after taking immobilisation into account + real, intent(out), optional :: nmin_immob_yasso ! nitrogen mineralisation (> 0) or immobilisation (< 0) when use_yasso = .true. + + real :: nmin_immob_pot + real :: reduction + + if (.not. use_yasso) then + nupt_max_adj = nupt_max + return + end if + + if (.not. present(nmin_immob_yasso)) then + error stop 'nmin_immob_yasso must be present use use_yasso == .true.' + end if + + nmin_immob_pot = yasso_ntend + if (nmin_immob_pot < 0.0) then + ! net N mineralisation, no constraint on plant uptake + nmin_immob_yasso = -nmin_immob_pot + nupt_max_adj = nupt_max + else + ! net immobilisation + if (nupt_max + nmin_immob_pot < NMIN) then + ! plant and microbial N demand fully satisfied + nupt_max_adj = nupt_max + nmin_immob_yasso = -nmin_immob_pot + else + ! scale down proportionally + reduction = NMIN / (nupt_max + nmin_immob_pot) + nupt_max_adj = nupt_max * reduction + nmin_immob_yasso = -nmin_immob_pot * reduction + end if + end if + +end subroutine adjust_nmin_fluxes + end module soil diff --git a/models/basgra/src/yasso.f90 b/models/basgra/src/yasso.f90 new file mode 100644 index 00000000000..93fe0095a0a --- /dev/null +++ b/models/basgra/src/yasso.f90 @@ -0,0 +1,587 @@ +module yasso +implicit none +private + +real, parameter :: days_yr = 365.0 +integer, parameter, public :: statesize_yasso = 5 + +! The yasso parameter vector: +! 1-16 matrix A entries: 4*alpha, 12*p +! 17-21 Leaching parameters: w1,...,w5 IGNORED IN THIS FUNCTION +! 22-23 Temperature-dependence parameters for AWE fractions: beta_1, beta_2 +! 24-25 Temperature-dependence parameters for N fraction: beta_N1, beta_N2 +! 26-27 Temperature-dependence parameters for H fraction: beta_H1, beta_H2 +! 28-30 Precipitation-dependence parameters for AWE, N and H fraction: gamma, gamma_N, gamma_H +! 31-32 Humus decomposition parameters: p_H, alpha_H (Note the order!) +! 33-35 Woody parameters: theta_1, theta_2, r + +! The Yasso20 maximum a posteriori parameters: +integer, parameter, public :: num_params_y20 = 35 +real, parameter, public :: param_y20_map(num_params_y20) = (/ & + 0.51, & + 5.19, & + 0.13, & + 0.1, & + 0.5, & + 0., & + 1., & + 1., & + 0.99, & + 0., & + 0., & + 0., & + 0., & + 0., & + 0.163, & + 0., & + -0., & + 0., & + 0., & + 0., & + 0., & + 0.158, & + -0.002, & + 0.17, & + -0.005, & + 0.067, & + -0., & + -1.44, & + -2.0, & + -6.9, & + 0.0042, & + 0.0015, & + -2.55, & + 1.24, & + 0.25/) + +! Nitrogen-specific parameters +! +real, parameter, public :: nc_mb = 0.1 ! N-C ratio of the microbial biomass +real, parameter, public :: cue_min = 0.1 ! minimum microbial carbon use efficiency +real, public :: nc_h_max = 0.1 ! N-C ratio of the H pool + +! AWENH composition from Palosuo et al. (2015), for grasses. For now, we'll use the same +! composition for both above and below ground inputs. The last values (H) are always 0. +real, parameter :: awenh_fineroot(statesize_yasso) = (/0.46, 0.32, 0.04, 0.18, 0.0/) +real, parameter :: awenh_leaf(statesize_yasso) = (/0.46, 0.32, 0.04, 0.18, 0.0/) +! A soil amendment consisting of soluble carbon (and nitrogen) +real, parameter :: awenh_soluble(statesize_yasso) = (/0.0, 1.0, 0.0, 0.0, 0.0/) +! From Heikkinen et al 2021, composted horse manure with straw litter +real, parameter :: awenh_compost(statesize_yasso) = (/0.69, 0.09, 0.02, 0.20, 0.0/) + +integer, parameter, public :: met_ind_init = 1 + +! whether use the exponentially weighted averaging for meteorological parameters +logical, parameter, public :: use_met_ema = .true. + +public get_params +public decompose +public initialize +public initialize_totc +public average_met +public average_met_ema +public partition_nitr +public inputs_to_fractions + +contains + + subroutine get_params(param_base, alpha_awen, beta12, decomp_pc, param_final) + real, intent(in) :: param_base(:) ! base params, e.g. the yasso20 MAP vector + real, intent(in) :: alpha_awen(4) ! base decomposition rate for the AWEN pools + real, intent(in) :: beta12(2) ! temperature sensitivity parameters + real, intent(in) :: decomp_pc(2) ! perturbations along principal components for (1) AWE rates and (2) beta1 and beta2 + real, intent(out) :: param_final(:) ! the modified parameters + + real, parameter :: pc_rate(3) = (/-0.101565, -1.017149, -0.024609/) + real, parameter :: pc_tresp(2) = (/0.023387, -0.000753/) + + param_final = param_base + param_final(1:3) = alpha_awen(1:3) + pc_rate * decomp_pc(1) + param_final(4) = alpha_awen(4) + param_final(22:23) = beta12 + pc_tresp * decomp_pc(2) + ! hard constraints on parameter values + param_final(1:4) = max(param_final(1:4), 1e-6) ! decomposition rates must be strictly positive + param_final(22) = max(param_final(22), 0.0) ! first order temperature response must be positive + param_final(23) = min(param_final(23), 0.0) ! second order temperature response must be negative + end subroutine get_params + + subroutine initialize(param, flux_leafc_day, flux_rootc_day, flux_nitr_day, & + tempr_c, precip_day, tempr_ampl, totc_min, cstate, nstate) + ! A simple algorithm to initialize the SOC pools into a steady state or a partial + ! steady-state. First, the equilibrium SOC is evaluated. Then, if totc_min is > 0 and + ! greater than the equilibrium, the deficit will be covered by increasing the H + ! pool. The nitrogen pool is left unconstrained and is equal to the equilibrium N + + ! the possible contribution from the extra H. + real, intent(in) :: param(:) ! parameter vector + real, intent(in) :: flux_leafc_day ! carbon input with "leaf" composition per day + real, intent(in) :: flux_rootc_day ! carbon input with "fineroot" composition per day + real, intent(in) :: flux_nitr_day ! organic nitrogen input per day + real, intent(in) :: tempr_c + real, intent(in) :: tempr_ampl + real, intent(in) :: precip_day ! mm + real, intent(in) :: totc_min ! see above + real, intent(out) :: cstate(statesize_yasso) + real, intent(out) :: nstate ! nitrogen + + real :: neg_c_input_yr(statesize_yasso) + real :: matrix(statesize_yasso, statesize_yasso) + real :: totc + + + ! Carbon + ! + neg_c_input_yr = -(flux_leafc_day * awenh_leaf + flux_rootc_day * awenh_fineroot) * 365.0 + call evaluate_matrix_mean_tempr(param, tempr_c, precip_day * days_yr,tempr_ampl, matrix) + ! Solve the equilibrium condition Ax + b = 0 + call solve(matrix, neg_c_input_yr, cstate) + totc = sum(cstate) + !dblepr1('totc before adjust', -1, totc) + !dblepr1('totc_min before adjust', -1, totc_min) + if (totc_min > 0.0 .and. totc < totc_min) then + cstate(5) = cstate(5) + totc_min - totc + end if + !dblepr1('totc after adjust', -1, sum(cstate)) + + ! Nitrogen + ! + call eval_steadystate_nitr(& + cstate, & + -sum(neg_c_input_yr), & ! respiration equal to C input in equilibrium + flux_nitr_day * days_yr, & + matrix, & + nstate) + + end subroutine initialize + + subroutine eval_steadystate_nitr(cstate, resp_yr, nitr_input_yr, matrix, nstate) + ! evaluate the steady state N pool based on the steady state C pools. + real, intent(in) :: cstate(statesize_yasso) + real, intent(in) :: resp_yr ! respiration in steady state == negative input + real, intent(in) :: nitr_input_yr ! nitrogen input + real, intent(in) :: matrix(statesize_yasso, statesize_yasso) ! the matrix used in steady state computation + real, intent(out) :: nstate ! steady state N + + integer, parameter :: max_cue_iter = 10 + + real :: decomp_h + real :: cue + real :: cupt_awen + real :: nc_awen + real :: growth_c + integer :: cue_iter + real :: nc_som + + decomp_h = matrix(5,5) * cstate(5) + cue = 0.43 ! initially + + do cue_iter = 1, max_cue_iter + cupt_awen = (resp_yr - decomp_h) / (1.0 - cue) + growth_c = cue * cupt_awen + ! Solve nc_awen from the state equation (below) such that nstate becomes stationary: + nc_awen = (1.0 / cupt_awen) * (nc_mb * cue * cupt_awen - nc_h_max*decomp_h + nitr_input_yr) + nstate = sum(cstate(1:4)) * nc_awen + nc_h_max * cstate(5) + nc_som = nstate / sum(cstate) + cue = max(min(0.43 * (nc_som / nc_mb) ** 0.6, 1.0), cue_min) + end do + + end subroutine eval_steadystate_nitr + + subroutine initialize_totc(param, totc, cn_input, fract_root_input, fract_legacy_soc, & + tempr_c, precip_day, tempr_ampl, cstate, nstate) + ! Another, simpler initialization method which enforces the total C and N stocks + ! strictly and requires setting the fraction of "legacy" carbon explicitly. Given a + ! total C, the C pools are set as a weighted combination of an equilibrated + ! partitioning and a "legacy" partitioning where all C is assigned to the H pool. The + ! weighting is given by the fract_legacy_soc parameter. The N pool is set analoguously + ! with the equilibrium N depending on the given C:N ratio of input. + real, intent(in) :: param(:) ! parameter vector + real, intent(in) :: totc ! total C pool + real, intent(in) :: cn_input ! C:N ratio of the steady-state input + real, intent(in) :: fract_root_input ! fraction of input C with the fineroot composition + real, intent(in) :: fract_legacy_soc + real, intent(in) :: tempr_c + real, intent(in) :: tempr_ampl + real, intent(in) :: precip_day ! mm + real, intent(out) :: cstate(statesize_yasso) + real, intent(out) :: nstate ! nitrogen + + real, parameter :: legacy_state(statesize_yasso) = (/0.0, 0.0, 0.0, 0.0, 1.0/) + real :: matrix(statesize_yasso, statesize_yasso) + real :: unit_input(statesize_yasso) + real :: tmpstate(statesize_yasso) + real :: eqstate(statesize_yasso) + real :: eqfac + real :: eqnitr + + call evaluate_matrix_mean_tempr(param, tempr_c, precip_day * days_yr,tempr_ampl, matrix) + if (fract_root_input < 0.0 .or. fract_root_input > 1) then + call dblepr1('Bad fract_root_input:', -1, fract_root_input) + call rexit('Bad fract_root_input') + end if + if (fract_legacy_soc < 0.0 .or. fract_legacy_soc > 1) then + call dblepr1('Bad fract_legacy_soc:', -1, fract_legacy_soc) + call rexit('Bad fract_legacy_soc') + end if + + unit_input = fract_root_input * awenh_fineroot + (1.0 - fract_root_input) * awenh_leaf + call solve(matrix, -unit_input, tmpstate) + eqfac = totc / sum(tmpstate) + eqstate = eqfac * tmpstate + call eval_steadystate_nitr(eqstate, eqfac, eqfac / cn_input, matrix, eqnitr) + + cstate = fract_legacy_soc * legacy_state * totc + (1.0 - fract_legacy_soc) * eqstate + nstate = fract_legacy_soc * totc * nc_h_max + (1.0 - fract_legacy_soc) * eqnitr + + call labelpr('TOTC INITIALIZATION', -1) + call dblepr('CSTATE:', -1, cstate, statesize_yasso) + call dblepr1('C:N ratio:', -1, sum(cstate)/nstate) + call dblepr1('Equlibrium C input:', -1, eqfac) + call dblepr1('legacy fraction:', -1, fract_legacy_soc) + call dblepr('equilibrium state:', -1, eqstate, statesize_yasso) + end subroutine initialize_totc + + subroutine inputs_to_fractions(leaf, root, soluble, compost, fract) + ! Split C in various types of inputs into the (here hard-coded) YASSO fractions + ! ("AWEN"). The fifth pool (H) never receives external input. Only C needs to be + ! split, nitrogen always goes to the total pool. + real, intent(in) :: leaf + real, intent(in) :: root + real, intent(in) :: soluble + real, intent(in) :: compost + real, intent(out) :: fract(statesize_yasso) + + fract = leaf * awenh_leaf & + + root * awenh_fineroot & + + soluble * awenh_soluble & + + compost * awenh_compost + + end subroutine inputs_to_fractions + + subroutine decompose(param, timestep_days, tempr_c, & + precip_day, cstate, nstate, ctend, ntend) + real, intent(in) :: param(:) ! parameter vector + real, intent(in) :: timestep_days + real, intent(in) :: tempr_c ! air temperature + real, intent(in) :: precip_day ! precipitation mm / day + real, intent(in) :: cstate(:) ! AWENH + real, intent(in) :: nstate ! nitrogen, single pool + real, intent(out) :: ctend(:) ! AWENH time derivative + real, intent(out) :: ntend ! nitrogen, single pool + + real :: matrix(statesize_yasso, statesize_yasso) + real :: totc ! total C, step beginning + real :: decomp_h ! C mineralization from the H pool + real :: cue ! carbon use (growth) efficiency + real :: nc_som ! current N:C ratio of the SOM + real :: growth_c ! microbial growth in C + real :: cupt_awen ! C uptake from AWEN + real :: cupt_h ! C uptake from H + real :: timestep_yr + real :: nc_awen ! N:C ratio of the AWEN pools + real :: nitr_awen ! nitrogen remaining after subtracting H nitrogen from the total + real :: nc_h ! N:C of the H pool + real :: resp ! heterotrophic respiration + + totc = sum(cstate) + + ! Carbon + ! + call evaluate_matrix(param, tempr_c, precip_day * days_yr, matrix) + ! The equation is in form of dx/dt = Ax + b. The standalone Y20 uses a matrix + ! exponential in yearly or longer steps, but here with a daily timestep this is not + ! needed and explicit 1st order time stepping is used instead. + timestep_yr = timestep_days / days_yr + ctend = matmul(matrix, cstate) * timestep_yr ! (matmul(matrix, cstate) + c_input_yr) * timestep_yr + resp = sum(-ctend) + + ! Nitrogen + ! + if (totc < 1e-6) then + ! No SOM, no need for N dynamics + ntend = 0.0 + else + decomp_h = matrix(5,5) * cstate(5) * timestep_yr + if (cstate(5) * nc_h_max > nstate) then + ! This should require very unusual inputs or parameters. Handle it nevertheless: + nc_h = nstate / totc + else + nc_h = nc_h_max + end if + nitr_awen = nstate - cstate(5) * nc_h + nc_awen = nitr_awen / (totc - cstate(5) + 1e-9) + nc_som = nstate / totc + cue = max(min(0.43 * (nc_som / nc_mb) ** 0.6, 1.0), cue_min) + ! resp_from_awen = uptake_from_awen * (1 - CUE), and thus: + cupt_awen = (resp - decomp_h) / (1.0 - cue) + ! Yasso has no C flow from H to AWEN so we assume no C uptake from H. + growth_c = cue * cupt_awen + ! The immobilization / mineralization is equal to the difference of nitrogen needed for + ! microbial growth and the nitrogen released from the decomposed organic matter. + ntend = nc_mb * growth_c - nc_awen * cupt_awen - nc_h * decomp_h + end if + + end subroutine decompose + + subroutine partition_nitr(cstate, nstate, nitr_awen, nitr_h) + real, intent(in) :: nstate + real, intent(in) :: cstate(statesize_yasso) + real, intent(out) :: nitr_awen + real, intent(out) :: nitr_h + + real :: totc + real :: nc_h + + if (cstate(5) * nc_h_max > nstate) then + ! This should require very unusual inputs or parameters. Handle it nevertheless: + nc_h = nstate / totc + else + nc_h = nc_h_max + end if + nitr_h = nc_h * cstate(5) + nitr_awen = nstate - nitr_h + end subroutine partition_nitr + + subroutine evaluate_matrix(param, tempr, precip, matrix) + real, intent(in) :: param(:) ! parameter vector + real, intent(in) :: tempr ! temperature deg C + real, intent(in) :: precip ! mm / yr + real, intent(out) :: matrix(:,:) ! decomposition matrix, "A" in YASSO publications + + real :: temprm ! temperature modifier for AWE + real :: temprmN ! temperature modifier for N + real :: temprmH ! temperature modifier for H + real :: decm ! rate modifier for AWE + real :: decmN ! rate modifier for N + real :: decmH ! rate modifier for H + + integer :: ii + + temprm = exp(param(22)*tempr + param(23)*tempr**2) + temprmN = exp(param(24)*tempr + param(25)*tempr**2) + temprmH = exp(param(26)*tempr + param(27)*tempr**2) + + ! The full rate modifiers including precipitation. The Y20 code has here division + ! by 12 due to monthly averaging of the temperature modifer, which is not done here. + decm = temprm * (1.0 - exp(param(28) * precip * 0.001)) + decmN = temprmN * (1.0 - exp(param(29) * precip * 0.001)) + decmH = temprmH * (1.0 - exp(param(30) * precip * 0.001)) + + ! Calculating matrix A (will work ok despite the sign of alphas) + DO ii = 1,3 + matrix(ii,ii) = -ABS(param(ii))*decm + END DO + matrix(4,4) = -ABS(param(4))*decmN + + matrix(1,2) = param(5)*ABS(matrix(2,2)) + matrix(1,3) = param(6)*ABS(matrix(3,3)) + matrix(1,4) = param(7)*ABS(matrix(4,4)) + matrix(1,5) = 0.0 ! no mass flows from H -> AWEN + matrix(2,1) = param(8)*ABS(matrix(1,1)) + matrix(2,3) = param(9)*ABS(matrix(3,3)) + matrix(2,4) = param(10)*ABS(matrix(4,4)) + matrix(2,5) = 0.0 + matrix(3,1) = param(11)*ABS(matrix(1,1)) + matrix(3,2) = param(12)*ABS(matrix(2,2)) + matrix(3,4) = param(13)*ABS(matrix(4,4)) + matrix(3,5) = 0.0 + matrix(4,1) = param(14)*ABS(matrix(1,1)) + matrix(4,2) = param(15)*ABS(matrix(2,2)) + matrix(4,3) = param(16)*ABS(matrix(3,3)) + matrix(4,5) = 0.0 + matrix(5,5) = -ABS(param(32))*decmH ! no size effect in humus + DO ii = 1,4 + matrix(5,ii) = param(31)*ABS(matrix(ii,ii)) ! mass flows AWEN -> H (size effect is present here) + END DO + + end subroutine evaluate_matrix + + subroutine evaluate_matrix_mean_tempr(param, tempr, precip, tempr_ampl, matrix) + ! Evaluate the matrix as above, but use the old YASSO-15 temperature averaging + real, intent(in) :: param(:) ! parameter vector + real, intent(in) :: tempr ! temperature deg C + real, intent(in) :: precip ! mm / yr + real, intent(in) :: tempr_ampl ! temperature yearly amplitude, deg C + real, intent(out) :: matrix(:,:) ! decomposition matrix, "A" in YASSO publications + + real :: temprm ! temperature modifier for AWE + real :: temprmN ! temperature modifier for N + real :: temprmH ! temperature modifier for H + real :: decm ! rate modifier for AWE + real :: decmN ! rate modifier for N + real :: decmH ! rate modifier for H + real :: te(4) ! temperature for averaging the temperature modifier + + integer :: ii + real, parameter :: pi = 3.141592653589793 + + ! temperature annual cycle approximation + te(1) = tempr+4*tempr_ampl*(1/sqrt(2.0)-1)/pi + te(2) = tempr-4*tempr_ampl/sqrt(2.0)/pi + te(3) = tempr+4*tempr_ampl*(1-1/sqrt(2.0))/pi + te(4) = tempr+4*tempr_ampl/sqrt(2.0)/pi + + ! average over the 4 temperature points + temprm = 0.25 * sum(exp(param(22)*te + param(23)*te**2)) + temprmN = 0.25 * sum(exp(param(24)*te + param(25)*te**2)) + temprmH = 0.25 * sum(exp(param(26)*te + param(27)*te**2)) + + ! The full rate modifiers including precipitation. The Y20 code has here division + ! by 12 due to monthly averaging of the temperature modifer, which is not done here. + decm = temprm * (1.0 - exp(param(28) * precip * 0.001)) + decmN = temprmN * (1.0 - exp(param(29) * precip * 0.001)) + decmH = temprmH * (1.0 - exp(param(30) * precip * 0.001)) + + ! Calculating matrix A (will work ok despite the sign of alphas) + DO ii = 1,3 + matrix(ii,ii) = -ABS(param(ii))*decm + END DO + matrix(4,4) = -ABS(param(4))*decmN + + matrix(1,2) = param(5)*ABS(matrix(2,2)) + matrix(1,3) = param(6)*ABS(matrix(3,3)) + matrix(1,4) = param(7)*ABS(matrix(4,4)) + matrix(1,5) = 0.0 ! no mass flows from H -> AWEN + matrix(2,1) = param(8)*ABS(matrix(1,1)) + matrix(2,3) = param(9)*ABS(matrix(3,3)) + matrix(2,4) = param(10)*ABS(matrix(4,4)) + matrix(2,5) = 0.0 + matrix(3,1) = param(11)*ABS(matrix(1,1)) + matrix(3,2) = param(12)*ABS(matrix(2,2)) + matrix(3,4) = param(13)*ABS(matrix(4,4)) + matrix(3,5) = 0.0 + matrix(4,1) = param(14)*ABS(matrix(1,1)) + matrix(4,2) = param(15)*ABS(matrix(2,2)) + matrix(4,3) = param(16)*ABS(matrix(3,3)) + matrix(4,5) = 0.0 + matrix(5,5) = -ABS(param(32))*decmH ! no size effect in humus + DO ii = 1,4 + matrix(5,ii) = param(31)*ABS(matrix(ii,ii)) ! mass flows AWEN -> H (size effect is present here) + END DO + + end subroutine evaluate_matrix_mean_tempr + + subroutine average_met(met_daily, met_rolling, aver_size, met_state, met_ind) + ! Evaluate a rolling window average for given met quantities. Used for scaling met + ! parameters from daily to monthly level. + real, intent(in) :: met_daily(:) + real, intent(out) :: met_rolling(:) + integer, intent(in) :: aver_size ! number of days to average over, must not change + real, intent(inout) :: met_state(:,:) ! size(met_daily), aver_size + 1 + integer, intent(inout) :: met_ind ! a counter, must be 1 on first call, not changed outside + + if (met_ind < 1 .or. met_ind > aver_size+1) then + call intpr1('something wrong with met_ind:', -1, met_ind) + call rexit('something wrong with met_ind') + end if + if (size(met_state, 2) /= aver_size + 1 .or. size(met_state, 1) /= size(met_rolling)) then + call intpr('met_state has wrong size', -1, shape(met_state), 2) + call rexit('met_state has wrong size') + end if + + if (met_ind <= aver_size) then + ! For the first aver_size days average as many values as have been input. + met_state(:,met_ind) = met_daily + met_ind = met_ind + 1 + else + ! met_ind now stays as aver_size+1 + met_state(:,aver_size+1) = met_daily + met_state(:,1:aver_size) = met_state(:,2:aver_size+1) + end if + + met_rolling = sum(met_state(:,1:met_ind-1), dim=2) / (met_ind-1) + + end subroutine average_met + + subroutine average_met_ema(met_daily, met_rolling) + ! Evaluate an exponentially weighted moving average for the daily met quantities to + ! scale them to monthly level. + real, intent(in) :: met_daily(:) + real, intent(out) :: met_rolling(:) + + real :: alpha_smooth1=0.05, alpha_smooth2=0.005 + + if (size(met_rolling) /= 2) then + call rexit('met_rolling has wrong shape') + end if + if (size(met_daily) /= 2) then + call rexit('met_daily has wrong shape') + end if + + met_rolling(1) = alpha_smooth1 * met_daily(1) + (1-alpha_smooth1) * met_rolling(1) + met_rolling(2) = alpha_smooth2 * met_daily(2) + (1-alpha_smooth2) * met_rolling(2) + + end subroutine average_met_ema + + !************************************************************************************ + ! Linear algebra for the steady state computation + + SUBROUTINE solve(A, b, x) + ! Solve linear system A*x = b + IMPLICIT NONE + INTEGER,PARAMETER :: n = 5 + REAL,DIMENSION(n,n),INTENT(IN) :: A + REAL,DIMENSION(n),INTENT(IN) :: b + REAL,DIMENSION(n),INTENT(OUT) :: x + REAL,DIMENSION(n,n) :: U + REAL,DIMENSION(n) :: c + INTEGER :: i + + ! transform the problem to upper diagonal form + CALL pgauss(A, b, U, c) + + ! solve U*x = c via back substitution + x(n) = c(n)/U(n,n) + DO i = n-1,1,-1 + x(i) = (c(i) - DOT_PRODUCT(U(i,i+1:n),x(i+1:n)))/U(i,i) + END DO + END SUBROUTINE solve + + SUBROUTINE pgauss(A, b, U, c) + ! Transform the lin. system to upper diagonal form using gaussian elimination + ! with pivoting + IMPLICIT NONE + INTEGER,PARAMETER :: n = 5 + REAL,DIMENSION(n,n),INTENT(IN) :: A + REAL,DIMENSION(n),INTENT(IN) :: b + REAL,DIMENSION(n,n),INTENT(OUT) :: U + REAL,DIMENSION(n),INTENT(OUT) :: c + INTEGER :: k, j + REAL,PARAMETER :: tol = 1E-12 + + U = A + c = b + DO k = 1,n-1 + CALL pivot(U,c,k) ! do pivoting (though may not be necessary in our case) + IF (ABS(U(k,k)) <= tol) THEN + call rwarn('Warning!!! Matrix is singular to working precision!') + END IF + U(k+1:n,k) = U(k+1:n,k)/U(k,k) + DO j = k+1,n + U(j,k+1:n) = U(j,k+1:n) - U(j,k)*U(k,k+1:n) + END DO + c(k+1:n) = c(k+1:n) - c(k)*U(k+1:n,k) + END DO + END SUBROUTINE pgauss + + SUBROUTINE pivot(A, b, k) + ! perform pivoting to matrix A and vector b at row k + IMPLICIT NONE + INTEGER,PARAMETER :: n = 5 + REAL,DIMENSION(n,n),INTENT(INOUT) :: A + REAL,DIMENSION(n),INTENT(INOUT) :: b + INTEGER,INTENT(IN) :: k + INTEGER :: q, pk + + !call dblepr('Pivot elements are: ', -1, A(k:n,k), size(A(k:n,k))) + q = MAXLOC(ABS(A(k:n,k)),1) + !call intpr('', -1, q, 1) + IF (q > 1) THEN + pk = k-1+q + A(k:pk:pk-k,:) = A(pk:k:k-pk,:) + b(k:pk:pk-k) = b(pk:k:k-pk) + END IF + !call dblepr('Pivot elements are: ', -1, A(k:n,k), size(A(k:n,k))) + END SUBROUTINE pivot + +end module yasso + diff --git a/models/basgra/tests/testthat.R b/models/basgra/tests/testthat.R index fd4d8aec47b..7e40507ca0b 100644 --- a/models/basgra/tests/testthat.R +++ b/models/basgra/tests/testthat.R @@ -1,11 +1,3 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2012 University of Illinois, NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# University of Illinois/NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- library(testthat) library(PEcAn.utils) diff --git a/models/basgra/tests/testthat/ic_with_yasso_pools.nc b/models/basgra/tests/testthat/ic_with_yasso_pools.nc new file mode 100644 index 00000000000..784705865ba Binary files /dev/null and b/models/basgra/tests/testthat/ic_with_yasso_pools.nc differ diff --git a/models/basgra/tests/testthat/ic_with_yasso_pools_and_met.nc b/models/basgra/tests/testthat/ic_with_yasso_pools_and_met.nc new file mode 100644 index 00000000000..b382689ad91 Binary files /dev/null and b/models/basgra/tests/testthat/ic_with_yasso_pools_and_met.nc differ diff --git a/models/basgra/tests/testthat/test.met.2019.nc b/models/basgra/tests/testthat/test.met.2019.nc new file mode 100644 index 00000000000..de40170147f Binary files /dev/null and b/models/basgra/tests/testthat/test.met.2019.nc differ diff --git a/models/basgra/tests/testthat/test.met2model.R b/models/basgra/tests/testthat/test.met2model.R deleted file mode 100644 index 566b76d2cb4..00000000000 --- a/models/basgra/tests/testthat/test.met2model.R +++ /dev/null @@ -1,18 +0,0 @@ -context("met2model") - -outfolder <- tempfile() -setup(dir.create(outfolder, showWarnings = FALSE)) -teardown(unlink(outfolder, recursive = TRUE)) - -test_that("Met conversion runs without error", { - skip("This is a template test that will not run. To run it, remove this `skip` call.") - nc_path <- system.file("test-data", "CRUNCEP.2000.nc", - package = "PEcAn.utils") - in.path <- dirname(nc_path) - in.prefix <- "CRUNCEP" - start_date <- "2000-01-01" - end_date <- "2000-12-31" - result <- met2model.MODEL(in.path, in.prefix, outfolder, start_date, end_date) - expect_s3_class(result, "data.frame") - expect_true(file.exists(result[["file"]][[1]])) -}) diff --git a/models/basgra/tests/testthat/test.run_BASGRA.R b/models/basgra/tests/testthat/test.run_BASGRA.R new file mode 100644 index 00000000000..64b0e43c035 --- /dev/null +++ b/models/basgra/tests/testthat/test.run_BASGRA.R @@ -0,0 +1,727 @@ +context("run_BASGRA") + +mktmpdir <- function(env = parent.frame()) { + # they now recommend this style instead of using setup() and teardown() + outfolder <- tempfile() + dir.create(outfolder, showWarnings = FALSE) + withr::defer(unlink(outfolder, recursive = TRUE), env) + outfolder +} + +write_harv_fert <- function(path_harv, path_fert) { + # write harvest and fertilize files + df_harvest <- data.frame( + year = 2019, + doy = 163, + CLAIV = 0.7 + ) + harvest_file <- path_harv # + write.csv(df_harvest, harvest_file, row.names=FALSE) + df_fertilize <- data.frame( + year = 2019, + doy = 128, + amount = 10.0 + ) + fertilize_file <- path_fert # + write.csv(df_fertilize, fertilize_file, row.names=FALSE) +} + +write_new_fert <- function(path_fert, which_type) { + if (which_type == 'mineral') { + df_fertilize <- data.frame( + year = 2019, + doy = 128, + Nmin = 10.0, + Norg = 0.0, + C_soluble = 0.0, + C_compost = 0.0 + ) + } else if (which_type == 'soluble') { + df_fertilize <- data.frame( + year = 2019, + doy = 128, + Nmin = 0.0, + Norg = 10.0, + C_soluble = 200.0, + C_compost = 0.0 + ) + } else if (which_type == 'compost') { + df_fertilize <- data.frame( + year = 2019, + doy = 128, + Nmin = 0.0, + Norg = 10.0, + C_soluble = 0.0, + C_compost = 200.0 + ) + } else if (which_type == 'invalid') { + df_fertilize <- data.frame( + year = 2019, + doy = 128, + Nmin = 6.5, + badstuff = 6.0 + ) + } + fertilize_file <- path_fert # + write.csv(df_fertilize, fertilize_file, row.names=FALSE) +} + +test_that('two harvests yield more than one', { + met_path <- 'test.met' + df_params <- read.csv(system.file('BASGRA_params.csv', package='PEcAn.BASGRA')) + run_params <- setNames(df_params[,2], df_params[,1]) + outfolder <- mktmpdir() + fert_file <- file.path(outfolder, 'fertilize.csv') + write_new_fert(fert_file, 'mineral') + + harv_file <- file.path(outfolder, 'harvest.csv') + write.csv( + data.frame( + year = 2019, + doy = 125 + ), + harv_file, row.names=FALSE + ) + run_BASGRA( + met_path, run_params, harv_file, fert_file, + start_date = '2019-01-01', + end_date = '2019-12-31 23:00', + outdir = outfolder, + sitelat = 60.29, + sitelon = 22.39, # match the test meteo data file + write_raw_output = TRUE + ) + output_one_harv <- read.csv(file.path(outfolder, 'output_basgra.csv')) + + harv_file <- file.path(outfolder, 'harvest.csv') + write.csv( + data.frame( + year = c(2019, 2019), + doy = c(125, 165) + ), + harv_file, row.names=FALSE + ) + run_BASGRA( + met_path, run_params, harv_file, fert_file, + start_date = '2019-01-01', + end_date = '2019-12-31 23:00', + outdir = outfolder, + sitelat = 60.29, + sitelon = 22.39, # match the test meteo data file + write_raw_output = TRUE + ) + output_two_harv <- read.csv(file.path(outfolder, 'output_basgra.csv')) + expect_gt(sum(output_two_harv$YIELD), sum(output_one_harv$YIELD)) +}) + +test_that('harvest followed by cut yields same as only harvest but different mean LAI', { + met_path <- 'test.met' + df_params <- read.csv(system.file('BASGRA_params.csv', package='PEcAn.BASGRA')) + run_params <- setNames(df_params[,2], df_params[,1]) + outfolder <- mktmpdir() + fert_file <- file.path(outfolder, 'fertilize.csv') + write_new_fert(fert_file, 'mineral') + + harv_file <- file.path(outfolder, 'harvest.csv') + write.csv( + data.frame( + year = 2019, + doy = 125, + CLAIV = 0.7 + ), + harv_file, row.names=FALSE + ) + run_BASGRA( + met_path, run_params, harv_file, fert_file, + start_date = '2019-01-01', + end_date = '2019-12-31 23:00', + outdir = outfolder, + sitelat = 60.29, + sitelon = 22.39, # match the test meteo data file + write_raw_output = TRUE + ) + output_only_harv <- read.csv(file.path(outfolder, 'output_basgra.csv')) + + harv_file <- file.path(outfolder, 'harvest.csv') + write.csv( + data.frame( + year = c(2019, 2019), + doy = c(125, 165), + CLAIV = c(0.7, 0.7), + cut_only = c(0, 1) + ), + harv_file, row.names=FALSE + ) + run_BASGRA( + met_path, run_params, harv_file, fert_file, + start_date = '2019-01-01', + end_date = '2019-12-31 23:00', + outdir = outfolder, + sitelat = 60.29, + sitelon = 22.39, # match the test meteo data file + write_raw_output = TRUE + ) + output_harv_cut <- read.csv(file.path(outfolder, 'output_basgra.csv')) + + expect_equal(sum(output_only_harv$YIELD), sum(output_harv_cut$YIELD)) + expect_equal(sum(output_only_harv$FHARVC), sum(output_harv_cut$FHARVC)) + expect_false(mean(output_only_harv$LAI) == mean(output_harv_cut$LAI)) +}) + +test_that('changing CLAIV changes LAI and yield', { + met_path <- 'test.met' + df_params <- read.csv(system.file('BASGRA_params.csv', package='PEcAn.BASGRA')) + run_params <- setNames(df_params[,2], df_params[,1]) + outfolder <- mktmpdir() + fert_file <- file.path(outfolder, 'fertilize.csv') + write_new_fert(fert_file, 'mineral') + harv_file <- file.path(outfolder, 'harvest.csv') + write.csv( + data.frame( + year = 2019, + doy = 125, + CLAIV = 1.0 + ), + harv_file, row.names=FALSE + ) + run_BASGRA( + met_path, run_params, harv_file, fert_file, + start_date = '2019-01-01', + end_date = '2019-12-31 23:00', + outdir = outfolder, + sitelat = 60.29, + sitelon = 22.39, # match the test meteo data file + write_raw_output = TRUE + ) + output_high <- read.csv(file.path(outfolder, 'output_basgra.csv')) + + write.csv( + data.frame( + year = 2019, + doy = 125, + CLAIV = 0.001 + ), + harv_file, row.names=FALSE + ) + run_BASGRA( + met_path, run_params, harv_file, fert_file, + start_date = '2019-01-01', + end_date = '2019-12-31 23:00', + outdir = outfolder, + sitelat = 60.29, + sitelon = 22.39, # match the test meteo data file + write_raw_output = TRUE + ) + output_low <- read.csv(file.path(outfolder, 'output_basgra.csv')) + + expect_true(any(output_high$LAI > output_low$LAI)) + # for a single harvest, we expect a higher yield with lower CLAIV + expect_gt(sum(output_low$FHARVC), sum(output_high$FHARVC)) +}) + +test_that('invalid harvest file raises an error', { + met_path <- 'test.met' + df_params <- read.csv(system.file('BASGRA_params.csv', package='PEcAn.BASGRA')) + run_params <- setNames(df_params[,2], df_params[,1]) + outfolder <- mktmpdir() + fert_file <- file.path(outfolder, 'fertilize.csv') + write_new_fert(fert_file, 'mineral') + harv_file <- file.path(outfolder, 'harvest.csv') + write.csv( + data.frame( + year = 2019, + doy = 125, + garbage = 1.0 + ), + harv_file, row.names=FALSE + ) + expect_error( + run_BASGRA( + met_path, run_params, harv_file, fert_file, + start_date = '2019-01-01', + end_date = '2019-12-31 23:00', + outdir = outfolder, + sitelat = 60.29, + sitelon = 22.39 # match the test meteo data file + ) + ) +}) + +test_that('model produces some output', { + met_path <- 'test.met' + df_params <- read.csv(system.file('BASGRA_params.csv', package='PEcAn.BASGRA')) + run_params <- setNames(df_params[,2], df_params[,1]) + outfolder <- mktmpdir() + harvest_file <- file.path(outfolder, 'harvest.csv') + fertilize_file <- file.path(outfolder, 'fertilize.csv') + write_harv_fert(harvest_file, fertilize_file) + + run_BASGRA( + met_path, run_params, harvest_file, fertilize_file, + start_date = '2019-01-01', + end_date = '2019-12-31 23:00', + outdir = outfolder, + sitelat = 60.29, + sitelon = 22.39, # match the test meteo data file + write_raw_output = TRUE + ) + + output <- read.csv(file.path(outfolder, 'output_basgra.csv')) + expect_true(any(output$LAI > 0)) + expect_true(all(!is.na(output))) +}) + +test_that('Fertilizer C inputs are zeroed without Yasso', { + met_path <- 'test.met' + df_params <- read.csv(system.file('BASGRA_params.csv', package='PEcAn.BASGRA')) + run_params <- setNames(df_params[,2], df_params[,1]) + + outfolder <- mktmpdir() + harvest_file <- file.path(outfolder, 'harvest.csv') + fert_file_mineral <- file.path(outfolder, 'fert.mineral.csv') + write_harv_fert(harvest_file, fert_file_mineral) + fert_file_soluble <- file.path(outfolder, 'fert.soluble.csv') + write_new_fert(fert_file_soluble, 'soluble') + run_BASGRA( + met_path, run_params, harvest_file, fert_file_soluble, + start_date = '2019-01-01', + end_date = '2019-12-31 23:00', + outdir = outfolder, + sitelat = 60.29, + sitelon = 22.39, # match the test meteo data file + write_raw_output = TRUE + ) + output <- read.csv(file.path(outfolder, 'output_basgra.csv')) + expect_true(all(output$FSOILAMDC == 0.0)) +}) + +test_that('Fertilizer C inputs work consistently with Yasso', { + met_path <- 'test.met' + df_params <- read.csv(system.file('BASGRA_params.csv', package='PEcAn.BASGRA')) + df_params[df_params[,1] == 'use_yasso', 2] <- 1 + df_params[df_params[,1] == 'use_nitrogen', 2] <- 0 + + run_params <- setNames(df_params[,2], df_params[,1]) + + outfolder <- mktmpdir() + harvest_file <- file.path(outfolder, 'harvest.csv') + fert_file_mineral <- file.path(outfolder, 'fert.mineral.csv') + write_harv_fert(harvest_file, fert_file_mineral) + + run_BASGRA( + met_path, run_params, harvest_file, fert_file_mineral, + start_date = '2019-01-01', + end_date = '2019-12-31 23:00', + outdir = outfolder, + sitelat = 60.29, + sitelon = 22.39, # match the test meteo data file + write_raw_output = TRUE + ) + output_mineral <- read.csv(file.path(outfolder, 'output_basgra.csv')) + + fert_file_soluble <- file.path(outfolder, 'fert.soluble.csv') + write_new_fert(fert_file_soluble, 'soluble') + run_BASGRA( + met_path, run_params, harvest_file, fert_file_soluble, + start_date = '2019-01-01', + end_date = '2019-12-31 23:00', + outdir = outfolder, + sitelat = 60.29, + sitelon = 22.39, # match the test meteo data file + write_raw_output = TRUE + ) + output_soluble <- read.csv(file.path(outfolder, 'output_basgra.csv')) + + expect_true(all(output_soluble$CSOM_W >= output_mineral$CSOM_W)) + expect_true(any(output_soluble$CSOM_W > output_mineral$CSOM_W)) + expect_equal(sum(output_soluble$FSOILAMDC), 200.0) + + fert_file_compost <- file.path(outfolder, 'fert.compost.csv') + write_new_fert(fert_file_compost, 'compost') + run_BASGRA( + met_path, run_params, harvest_file, fert_file_compost, + start_date = '2019-01-01', + end_date = '2019-12-31 23:00', + outdir = outfolder, + sitelat = 60.29, + sitelon = 22.39, # match the test meteo data file + write_raw_output = TRUE + ) + output_compost <- read.csv(file.path(outfolder, 'output_basgra.csv')) + expect_true(all(output_compost$CSOM_A >= output_mineral$CSOM_A)) + expect_true(any(output_compost$CSOM_A > output_mineral$CSOM_A)) + expect_true(all(output_compost$CSOM_N >= output_mineral$CSOM_N)) + expect_true(any(output_compost$CSOM_N > output_mineral$CSOM_N)) + + expect_true(all(output_compost$CSOM_A >= output_soluble$CSOM_A)) + expect_true(any(output_compost$CSOM_A > output_soluble$CSOM_A)) + + expect_equal(sum(output_compost$FSOILAMDC), 200.0) + + fert_file_bad <- file.path(outfolder, 'fert.bad.csv') + write_new_fert(fert_file_bad, 'invalid') + expect_error( + run_BASGRA( + met_path, run_params, harvest_file, fert_file_bad, + start_date = '2019-01-01', + end_date = '2019-12-31 23:00', + outdir = outfolder, + sitelat = 60.29, + sitelon = 22.39 # match the test meteo data file + ) + ) +}) + + +test_that('new fertilization file format matches the old', { + met_path <- 'test.met' + df_params <- read.csv(system.file('BASGRA_params.csv', package='PEcAn.BASGRA')) + run_params_basic <- setNames(df_params[,2], df_params[,1]) + + df_params <- read.csv(system.file('BASGRA_params.csv', package='PEcAn.BASGRA')) + df_params[df_params[,1] == 'use_yasso', 2] <- 1 + df_params[df_params[,1] == 'use_nitrogen', 2] <- 0 + run_params_yasso <- setNames(df_params[,2], df_params[,1]) + + outfolder <- mktmpdir() + harvest_file <- file.path(outfolder, 'harvest.csv') + fert_file_old <- file.path(outfolder, 'fert.old.csv') + write_harv_fert(harvest_file, fert_file_old) + fert_file_mineral <- file.path(outfolder, 'fert.mineral.csv') + write_new_fert(fert_file_mineral, 'mineral') + + run_BASGRA( + met_path, run_params_basic, harvest_file, fert_file_old, + start_date = '2019-01-01', + end_date = '2019-12-31 23:00', + outdir = outfolder, + sitelat = 60.29, + sitelon = 22.39, # match the test meteo data file + write_raw_output = TRUE + ) + output_old_fert <- read.csv(file.path(outfolder, 'output_basgra.csv')) + run_BASGRA( + met_path, run_params_basic, harvest_file, fert_file_mineral, + start_date = '2019-01-01', + end_date = '2019-12-31 23:00', + outdir = outfolder, + sitelat = 60.29, + sitelon = 22.39, # match the test meteo data file + write_raw_output = TRUE + ) + output_mineral <- read.csv(file.path(outfolder, 'output_basgra.csv')) + expect_equal(output_old_fert, output_mineral) +}) + +test_that('model shows no nitrogen limitation when run with use_nitrogen = 0', { + met_path <- 'test.met' + #df_params <- read.csv('BASGRA_params_no_nitrogen.csv') + df_params <- read.csv(system.file('BASGRA_params.csv', package='PEcAn.BASGRA')) + df_params[df_params[,1] == 'use_nitrogen', 2] <- 0 + run_params <- setNames(df_params[,2], df_params[,1]) + outfolder <- mktmpdir() + harvest_file <- file.path(outfolder, 'harvest.csv') + fertilize_file <- file.path(outfolder, 'fertilize.csv') + write_harv_fert(harvest_file, fertilize_file) + + run_BASGRA( + met_path, run_params, harvest_file, fertilize_file, + start_date = '2019-01-01', + end_date = '2019-12-31 23:00', + outdir = outfolder, + sitelat = 60.29, + sitelon = 22.39, # match the test meteo data file + write_raw_output = TRUE + ) + + output <- read.csv(file.path(outfolder, 'output_basgra.csv')) + expect_equal(output$fNgrowth, rep(1.0, 365)) # if fNgrowth == 1 growth is not N-limited +}) + +## test_that('model crashes when run with use_yasso = 1 and use_nitrogen = 1', { +## met_path <- 'test.met' +## #df_params <- read.csv('BASGRA_params_yasso_use_nitrogen.csv') +## df_params <- read.csv(system.file('BASGRA_params.csv', package='PEcAn.BASGRA')) +## df_params[df_params[,1] == 'use_yasso', 2] <- 1 +## run_params <- setNames(df_params[,2], df_params[,1]) +## outfolder <- mktmpdir() +## harvest_file <- file.path(outfolder, 'harvest.csv') +## fertilize_file <- file.path(outfolder, 'fertilize.csv') +## write_harv_fert(harvest_file, fertilize_file) + +## # Now run_BASGRA should crash with ERROR STOP from fortran. We can't test this by +## # calling run_BASGRA directly (because R would exit) so instead we save it and load into +## # a child process. I'm open for more elegant solutions... +## run_wrapper <- function() { +## dyn.load("../../src/PEcAn.BASGRA.so") +## run_BASGRA( +## met_path, run_params, harvest_file, fertilize_file, +## start_date = '2019-01-01', +## end_date = '2019-12-31 23:00', +## outdir = outfolder, +## sitelat = 60.29, +## sitelon = 22.39 # match the test meteo data file +## ) +## } +## wrap_file <- file.path(outfolder, 'rwrp') +## save(run_wrapper, run_BASGRA, file=wrap_file) +## # trying to capture the output, bit of a kludge but maybe works on linux and mac +## suppressWarnings(messages <- system(sprintf('echo "load(\\"%s\\"); run_wrapper()"|R --vanilla 2>&1', wrap_file), intern=TRUE)) +## expect_true(length(grep('ERROR STOP', messages)) > 0) +## expect_false(file.exists(file.path(outfolder, 'output_basgra.csv'))) +## }) + +test_that('model produces reasonable yasso-specific output when use_yasso = 1 and use_nitrogen = 0', { + met_path <- 'test.met' + df_params <- read.csv(system.file('BASGRA_params.csv', package='PEcAn.BASGRA')) + df_params[df_params[,1] == 'use_yasso', 2] <- 1 + df_params[df_params[,1] == 'use_nitrogen', 2] <- 0 + run_params <- setNames(df_params[,2], df_params[,1]) + outfolder <- mktmpdir() + harvest_file <- file.path(outfolder, 'harvest.csv') + fertilize_file <- file.path(outfolder, 'fertilize.csv') + write_harv_fert(harvest_file, fertilize_file) + run_BASGRA( + met_path, run_params, harvest_file, fertilize_file, + start_date = '2019-01-01', + end_date = '2019-12-31 23:00', + outdir = outfolder, + sitelat = 60.29, + sitelon = 22.39, # match the test meteo data file + write_raw_output = TRUE + ) + output <- read.csv(file.path(outfolder, 'output_basgra.csv')) + expect_true(all(output[,c('CSOM_A', 'CSOM_W', 'CSOM_E', 'CSOM_N','CSOM_H','NSOM')] > 0)) + expect_true(all(output[,c('CSOM_A', 'CSOM_W', 'CSOM_E', 'CSOM_N','CSOM_H','NSOM')] < 1e6)) + expect_true(all(!is.na(output))) +}) + +test_that('model produces reasonable yasso-specific output when use_yasso = 1 and use_nitrogen = 1', { + met_path <- 'test.met' + df_params <- read.csv(system.file('BASGRA_params.csv', package='PEcAn.BASGRA')) + df_params[df_params[,1] == 'use_yasso', 2] <- 1 + df_params[df_params[,1] == 'use_nitrogen', 2] <- 1 + run_params <- setNames(df_params[,2], df_params[,1]) + outfolder <- mktmpdir() + harvest_file <- file.path(outfolder, 'harvest.csv') + fertilize_file <- file.path(outfolder, 'fertilize.csv') + write_harv_fert(harvest_file, fertilize_file) + run_BASGRA( + met_path, run_params, harvest_file, fertilize_file, + start_date = '2019-01-01', + end_date = '2019-12-31 23:00', + outdir = outfolder, + sitelat = 60.29, + sitelon = 22.39, # match the test meteo data file + write_raw_output = TRUE + ) + output <- read.csv(file.path(outfolder, 'output_basgra.csv')) + expect_true(all(output[,c('CSOM_A', 'CSOM_W', 'CSOM_E', 'CSOM_N','CSOM_H','NSOM')] > 0)) + expect_true(all(output[,c('CSOM_A', 'CSOM_W', 'CSOM_E', 'CSOM_N','CSOM_H','NSOM')] < 1e6)) + expect_true(all(output[,'Nmineralisation'] > 0)) + expect_true(all(output[,'NMIN'] >= 0)) + expect_true(all(!is.na(output))) +}) + +test_that('NSH is positive', { + met_path <- 'test.met' + df_params <- read.csv(system.file('BASGRA_params.csv', package='PEcAn.BASGRA')) + df_params[df_params[,1] == 'use_yasso', 2] <- 1 + df_params[df_params[,1] == 'use_nitrogen', 2] <- 1 + df_params[df_params[,1] == 'NMIN0', 2] <- 0.0 + run_params <- setNames(df_params[,2], df_params[,1]) + outfolder <- mktmpdir() + + df_fertilize <- data.frame( + year = 2019, + doy = 128, + amount = 1e-6 + ) + no_fertilize_file <- file.path(outfolder, 'no-fertilize.csv') + write.csv(df_fertilize, no_fertilize_file, row.names=FALSE) + + harvest_file <- file.path(outfolder, 'harvest.csv') + fertilize_file <- file.path(outfolder, 'fertilize.csv') + write_harv_fert(harvest_file, fertilize_file) + run_BASGRA( + met_path, run_params, harvest_file, no_fertilize_file, + start_date = '2019-01-01', + end_date = '2019-12-31 23:00', + outdir = outfolder, + sitelat = 60.29, + sitelon = 22.39, # match the test meteo data file + write_raw_output = TRUE + ) + output <- read.csv(file.path(outfolder, 'output_basgra.csv')) + expect_true(all(output[, 'NSH'] > 0)) +}) + + +test_that('Netcdf output is consistent with the raw output for certain variables', { + met_path <- 'test.met' + df_params <- read.csv(system.file('BASGRA_params.csv', package='PEcAn.BASGRA')) + df_params[df_params[,1] == 'use_yasso', 2] <- 1 + df_params[df_params[,1] == 'use_nitrogen', 2] <- 0 + run_params <- setNames(df_params[,2], df_params[,1]) + outfolder <- mktmpdir() + harvest_file <- file.path(outfolder, 'harvest.csv') + fertilize_file <- file.path(outfolder, 'fertilize.csv') + write_harv_fert(harvest_file, fertilize_file) + run_BASGRA( + met_path, run_params, harvest_file, fertilize_file, + start_date = '2019-01-01', + end_date = '2019-12-31 23:00', + outdir = outfolder, + sitelat = 60.29, + sitelon = 22.39, # match the test meteo data file + write_raw_output = TRUE + ) + output.raw <- read.csv(file.path(outfolder, 'output_basgra.csv')) + nc <- ncdf4::nc_open(file.path(outfolder, '2019.nc')) + + fastc_nc <- ncdf4::ncvar_get(nc, 'fast_soil_pool_carbon_content') + fastc_raw <- rowSums(output.raw[,c('CSOM_A', 'CSOM_W', 'CSOM_E', 'CSOM_N')]) + expect_equal(as.vector(fastc_nc), as.vector(fastc_raw*1e-3)) # g vs kg + + slowc_nc <- ncdf4::ncvar_get(nc, 'slow_soil_pool_carbon_content') + slowc_raw <- output.raw[,'CSOM_H'] + expect_equal(as.vector(slowc_nc), as.vector(slowc_raw*1e-3)) + + totc_nc <- ncdf4::ncvar_get(nc, 'TotSoilCarb') + totc_raw <- rowSums(output.raw[, c('CSOM_A', 'CSOM_W', 'CSOM_E', 'CSOM_N', 'CSOM_H')]) + expect_equal(as.vector(totc_nc), as.vector(totc_raw*1e-3)) +}) + +test_that('The yasso_rate_pc parameter has a reasonable effect', { + met_path <- 'test.met' + df_params <- read.csv(system.file('BASGRA_params.csv', package='PEcAn.BASGRA')) + df_params[df_params[,1] == 'use_yasso', 2] <- 1 + df_params[df_params[,1] == 'use_nitrogen', 2] <- 0 + run_params <- setNames(df_params[,2], df_params[,1]) + outfolder <- mktmpdir() + harvest_file <- file.path(outfolder, 'harvest.csv') + fertilize_file <- file.path(outfolder, 'fertilize.csv') + write_harv_fert(harvest_file, fertilize_file) + run_BASGRA( + met_path, run_params, harvest_file, fertilize_file, + start_date = '2019-01-01', + end_date = '2019-12-31 23:00', + outdir = outfolder, + sitelat = 60.29, + sitelon = 22.39, # match the test meteo data file + write_raw_output = TRUE + ) + output <- read.csv(file.path(outfolder, 'output_basgra.csv')) + + run_params_mod <- run_params + run_params_mod['yasso_rate_pc'] <- -1.0 # the component has negative values so this speeds up the decomp + run_BASGRA( + met_path, run_params_mod, harvest_file, fertilize_file, + start_date = '2019-01-01', + end_date = '2019-12-31 23:00', + outdir = outfolder, + sitelat = 60.29, + sitelon = 22.39, # match the test meteo data file + write_raw_output = TRUE + ) + output_mod <- read.csv(file.path(outfolder, 'output_basgra.csv')) + + # One could have thought that output_mod should have a higher Rsoil. But it doesn't + # always, because also the initial state changes. Now we'll just test the + # initialization. + expect_gt(output_mod[1, 'CSOM_H'], output[1, 'CSOM_H']) + awen = c('CSOM_A', 'CSOM_W', 'CSOM_E', 'CSOM_N') + expect_lt(sum(output_mod[1, awen]), sum(output[1, awen])) +}) + +test_that('The yasso ICs are handled consistently', { + met_path <- 'test.met' + df_params <- read.csv(system.file('BASGRA_params.csv', package='PEcAn.BASGRA')) + df_params[df_params[,1] == 'use_yasso', 2] <- 1 + df_params[df_params[,1] == 'use_nitrogen', 2] <- 1 + + run_params <- setNames(df_params[,2], df_params[,1]) + yasso_state <- c( + 849, 95, 51, 1092, 14298, 1536 + ) + run_params[c('CSOM_A', 'CSOM_W', 'CSOM_E', 'CSOM_N','CSOM_H','NSOM')] <- yasso_state + outfolder <- mktmpdir() + harvest_file <- file.path(outfolder, 'harvest.csv') + fertilize_file <- file.path(outfolder, 'fertilize.csv') + write_harv_fert(harvest_file, fertilize_file) + run_BASGRA( + met_path, run_params, harvest_file, fertilize_file, + start_date = '2019-01-01', + end_date = '2019-12-31 23:00', + outdir = outfolder, + sitelat = 60.29, + sitelon = 22.39, # match the test meteo data file + write_raw_output = TRUE + ) + output <- read.csv(file.path(outfolder, 'output_basgra.csv')) + output_state = as.numeric(output[1,c('CSOM_A', 'CSOM_W', 'CSOM_E', 'CSOM_N','CSOM_H','NSOM')]) + expect_equal( + output_state, + yasso_state, + tolerance=2 # needed because the end of time step state is in output + ) +}) + +test_that('The yasso ICs are ignored if negative', { + met_path <- 'test.met' + df_params <- read.csv(system.file('BASGRA_params.csv', package='PEcAn.BASGRA')) + df_params[df_params[,1] == 'use_yasso', 2] <- 1 + df_params[df_params[,1] == 'use_nitrogen', 2] <- 1 + + run_params <- setNames(df_params[,2], df_params[,1]) + yasso_state <- rep(-1, 6) + run_params[c('CSOM_A', 'CSOM_W', 'CSOM_E', 'CSOM_N','CSOM_H','NSOM')] <- yasso_state + outfolder <- mktmpdir() + harvest_file <- file.path(outfolder, 'harvest.csv') + fertilize_file <- file.path(outfolder, 'fertilize.csv') + write_harv_fert(harvest_file, fertilize_file) + run_BASGRA( + met_path, run_params, harvest_file, fertilize_file, + start_date = '2019-01-01', + end_date = '2019-12-31 23:00', + outdir = outfolder, + sitelat = 60.29, + sitelon = 22.39, # match the test meteo data file + write_raw_output = TRUE + ) + output <- read.csv(file.path(outfolder, 'output_basgra.csv')) + output_state = as.numeric(output[1,c('CSOM_A', 'CSOM_W', 'CSOM_E', 'CSOM_N','CSOM_H','NSOM')]) + expect_gt(sum(output_state), 1000) + # check that met values are reasonable from the defaults + output_met <- as.numeric(output[1,c('TEMPR30', 'PRECIP30')]) + expect_equal(output_met[1], 10, tolerance=20) + expect_equal(output_met[2], 1, tolerance=5) +}) + +test_that('The smoothed tempr and precip are read from params', { + met_path <- 'test.met' + df_params <- read.csv(system.file('BASGRA_params.csv', package='PEcAn.BASGRA')) + df_params[df_params[,1] == 'use_yasso', 2] <- 1 + df_params[df_params[,1] == 'use_nitrogen', 2] <- 0 + + run_params <- setNames(df_params[,2], df_params[,1]) + met_values = c(-1e5, 1e5) + run_params[c('TEMPR30', 'PRECIP30')] <- met_values + outfolder <- mktmpdir() + harvest_file <- file.path(outfolder, 'harvest.csv') + fertilize_file <- file.path(outfolder, 'fertilize.csv') + write_harv_fert(harvest_file, fertilize_file) + run_BASGRA( + met_path, run_params, harvest_file, fertilize_file, + start_date = '2019-01-01', + end_date = '2019-12-31 23:00', + outdir = outfolder, + sitelat = 60.29, + sitelon = 22.39, # match the test meteo data file + write_raw_output = TRUE + ) + output <- read.csv(file.path(outfolder, 'output_basgra.csv')) + output_values <- as.numeric(output[1,c('TEMPR30', 'PRECIP30')]) + expect_lt(output_values[1], -1e2) + expect_gt(output_values[2], 1e2) +}) diff --git a/models/basgra/tests/testthat/test.write.config.R b/models/basgra/tests/testthat/test.write.config.R new file mode 100644 index 00000000000..e467afbcf2b --- /dev/null +++ b/models/basgra/tests/testthat/test.write.config.R @@ -0,0 +1,218 @@ +context("write.config") + +outfolder <- tempfile() +setup(dir.create(outfolder, showWarnings = FALSE)) +teardown(unlink(outfolder, recursive = TRUE)) + +basesettings <- list( + rundir = outfolder, + host = list( + rundir=outfolder, + outdir=outfolder + ), + run = list( + site = list( + lat = 58.0, # must match the item in BASGRA_params.csv + lon = 25.0 + ), + inputs = list( + met = list( + path = 'dummy' + ), + harvest = list( + path = 'dummy' + ), + fertilize = list( + path = 'dummy' + ) + ), + start.date = 'start_date', + end.date = 'end_date' + ), + model = list() + +) + + +create_job_template <- function(content) { + # write.config has a handy feature to override the default job.template. + # We'll use this for testing individual items in it. + filename <- file.path(outfolder, 'job.template') + write(content, filename) + filename +} + +test_that('write.config retrieves default parameters from the file', { + jobtemplate <- create_job_template('@RUN_PARAMS@') + settings <- basesettings + settings$model$jobtemplate <- jobtemplate + trait.values = list(list()) # no traits given + params.from.file <- read.csv(system.file('BASGRA_params.csv', package='PEcAn.BASGRA'), col.names=c('name', 'value')) + default <- NULL + run.id <- 9999 + dir.create(file.path(outfolder, run.id)) + write.config.BASGRA(defaults, trait.values, settings, run.id) + job.file <- file.path(outfolder, run.id, 'job.sh') + content <- paste(readLines(job.file), collapse='\n') + param.vector <- eval(parse(text=content)) + expect_equal(params.from.file$name, names(param.vector)) + # a few of the parameters are redundant and get reset byt write.config based on the other parameters. + # these parameters need to be set consistently in the default parameter file or otherwise this test fails. + expect_equal(params.from.file$value, setNames(param.vector, NULL), tolerance=1e-4) +}) + +test_that('default param path from settings overrides the global default', { + jobtemplate <- create_job_template('@RUN_PARAMS@') + settings <- basesettings + settings$model$jobtemplate <- jobtemplate + trait.values = list(list()) # no traits given + param_path <- file.path(outfolder, 'modified.defaults.csv') + df_params <- read.csv(system.file('BASGRA_params.csv', package='PEcAn.BASGRA'), col.names=c('name', 'value')) + df_params[df_params$name == 'NMIN0', 'value'] <- -9991 + write.csv(df_params, param_path, row.names=FALSE) + settings$run$inputs$defaults$path <- param_path + run.id = 9998 + dir.create(file.path(outfolder, run.id)) + write.config.BASGRA(defaults, trait.values, settings, run.id) + job.file <- file.path(outfolder, run.id, 'job.sh') + content <- paste(readLines(job.file), collapse='\n') + param.vector <- eval(parse(text=content)) + expect_equal(setNames(param.vector['NMIN0'], NULL), -9991) +}) + +test_that('write.config modifies some trait values', { + jobtemplate <- create_job_template('@RUN_PARAMS@') + settings <- basesettings + settings$model$jobtemplate <- jobtemplate + trait.values = list( + list( + c2n_fineroot = 50.0, + leaf_width = 6.1 + ) + ) + default <- NULL + run.id <- 9999 + dir.create(file.path(outfolder, run.id), showWarnings = FALSE) + write.config.BASGRA(defaults, trait.values, settings, run.id) + job.file <- file.path(outfolder, run.id, 'job.sh') + content <- paste(readLines(job.file), collapse='\n') + param.vector <- eval(parse(text=content)) + expect_equal(param.vector['NCR'], c(NCR = 0.02)) + expect_equal(param.vector['LFWIDV'], c(LFWIDV = 6.1e-3)) # in meters +}) + +test_that('the force column in defaulfs.csv keep the default parameters even if pecan provides trait values', { + jobtemplate <- create_job_template('@RUN_PARAMS@') + settings <- basesettings + settings$model$jobtemplate <- jobtemplate + trait.values = list( + list( + c2n_fineroot = 50.0, + leaf_width = 6.1 + ) + ) + param_path <- file.path(outfolder, 'modified.defaults.csv') + df_params <- read.csv(system.file('BASGRA_params.csv', package='PEcAn.BASGRA'), col.names=c('name', 'value')) + df_params$force = rep(FALSE, nrow(df_params)) + df_params[df_params$name == 'LFWIDV', 'force'] <- TRUE + leaf_width_value <- df_params[df_params$name=='LFWIDV', 'value'] + write.csv(df_params, param_path, row.names=FALSE) + settings$run$inputs$defaults$path <- param_path + run.id = 9998 + write.config.BASGRA(defaults, trait.values, settings, run.id) + job.file <- file.path(outfolder, run.id, 'job.sh') + content <- paste(readLines(job.file), collapse='\n') + param.vector <- eval(parse(text=content)) + expect_equal(param.vector['LFWIDV'], c(LFWIDV=leaf_width_value)) # deafult value + expect_equal(param.vector['NCR'], c(NCR=0.02)) # trait value +}) + +test_that('the force column values are interpreted flexibly', { + jobtemplate <- create_job_template('@RUN_PARAMS@') + settings <- basesettings + settings$model$jobtemplate <- jobtemplate + param_path <- file.path(outfolder, 'modified.defaults.csv') + df_params <- read.csv(system.file('BASGRA_params.csv', package='PEcAn.BASGRA'), col.names=c('name', 'value')) + settings$run$inputs$defaults$path <- param_path + run.id = 9998 + flagvalue <- -999999 + trait.values <- list(list(leaf_width = 6.1)) + job.file <- file.path(outfolder, run.id, 'job.sh') + df_params[,2] <-flagvalue + + df_params$force = rep('True', nrow(df_params)) + write.csv(df_params, param_path, row.names=FALSE) + write.config.BASGRA(defaults, trait.values, settings, run.id) + content <- paste(readLines(job.file), collapse='\n') + param.vector <- setNames(eval(parse(text=content)), NULL) + expect_equal(length(param.vector), nrow(df_params)) + expect_equal(param.vector, rep(flagvalue, length(param.vector))) + + df_params$force = rep(1, nrow(df_params)) + write.csv(df_params, param_path, row.names=FALSE) + write.config.BASGRA(defaults, trait.values, settings, run.id) + content <- paste(readLines(job.file), collapse='\n') + param.vector <- setNames(eval(parse(text=content)), NULL) + expect_equal(length(param.vector), nrow(df_params)) + expect_true(all(param.vector == flagvalue)) + + df_params$force = rep(0, nrow(df_params)) + write.csv(df_params, param_path, row.names=FALSE) + write.config.BASGRA(defaults, trait.values, settings, run.id) + content <- paste(readLines(job.file), collapse='\n') + param.vector <- eval(parse(text=content)) + expect_equal(length(param.vector), nrow(df_params)) + expect_equal(param.vector['LFWIDV'], c(LFWIDV=6.1*1e-3)) # in mm +}) + +test_that('YASSO pool ICs pass thru (list)', { + jobtemplate <- create_job_template('@RUN_PARAMS@') + settings <- basesettings + settings$model$jobtemplate <- jobtemplate + default <- NULL + run.id <- 9999 + dir.create(file.path(outfolder, run.id), showWarnings = FALSE) + load(system.file('last_vals_basgra.Rdata', package='PEcAn.BASGRA')) + ic_list <- list( + CSOM_A = 1, + CSOM_W = 2, + CSOM_E = 3, + CSOM_N = 4, + CSOM_H = 5, + NSOM = 6, + TEMPR30 = 7, + PRECIP30 = 8, + test_vals = last_vals + ) + write.config.BASGRA(defaults, trait.values=list(), settings=settings, run.id=run.id, IC=ic_list) + job.file <- file.path(outfolder, run.id, 'job.sh') + content <- paste(readLines(job.file), collapse='\n') + param.vector <- eval(parse(text=content)) + state <- param.vector[c('CSOM_A', 'CSOM_W', 'CSOM_E', 'CSOM_N', 'CSOM_H', 'NSOM', 'TEMPR30', 'PRECIP30')] + expect_equal(setNames(state, NULL), seq(8)) +}) + +test_that('YASSO pool ICs pass thru (file)', { + jobtemplate <- create_job_template('@RUN_PARAMS@') + settings <- basesettings + settings$model$jobtemplate <- jobtemplate + settings$run$inputs$poolinitcond = list( + path='ic_with_yasso_pools_and_met.nc' + ) + default <- NULL + run.id <- 9999 + dir.create(file.path(outfolder, run.id), showWarnings = FALSE) + write.config.BASGRA(defaults, trait.values=list(), settings=settings, run.id=run.id) + job.file <- file.path(outfolder, run.id, 'job.sh') + content <- paste(readLines(job.file), collapse='\n') + param.vector <- eval(parse(text=content)) + state <- param.vector[c('CSOM_A', 'CSOM_W', 'CSOM_E', 'CSOM_N', 'CSOM_H', 'NSOM', 'TEMPR30', 'PRECIP30')] + correct_state <- c( + 1011.55245115532, 118.194058863007, 62.5131705827862, 1153.2435021838, 14274.4980088834, 1549.22075041662, + 12.0709309808298, 1.28496155077734 + ) + expect_equal(setNames(state, NULL), correct_state) +}) + + + diff --git a/models/biocro/.Rbuildignore b/models/biocro/.Rbuildignore new file mode 100644 index 00000000000..2d28facae40 --- /dev/null +++ b/models/biocro/.Rbuildignore @@ -0,0 +1,2 @@ +Dockerfile +model_info.json diff --git a/models/biocro/DESCRIPTION b/models/biocro/DESCRIPTION index 1d7c35ee7fd..51a2475e4dd 100644 --- a/models/biocro/DESCRIPTION +++ b/models/biocro/DESCRIPTION @@ -1,8 +1,7 @@ Package: PEcAn.BIOCRO Type: Package Title: PEcAn Package for Integration of the BioCro Model -Version: 1.7.2 -Date: 2021-10-04 +Version: 1.7.3.9000 Authors@R: c(person("David", "LeBauer", role = c("aut", "cre"), email = "dlebauer@email.arizona.edu"), person("Chris", "Black", role = c("aut"), @@ -28,15 +27,16 @@ Imports: rlang Suggests: BioCro, + knitr, testthat (>= 2.0.0), mockery (>= 0.3.0), PEcAn.DB, + rmarkdown, RPostgreSQL Remotes: github::ebimodeling/biocro@0.951 License: BSD_3_clause + file LICENSE Copyright: Energy Biosciences Institute, Authors -LazyLoad: yes -LazyData: FALSE Encoding: UTF-8 -RoxygenNote: 7.2.3 +VignetteBuilder: knitr, rmarkdown +RoxygenNote: 7.3.2 diff --git a/models/biocro/LICENSE b/models/biocro/LICENSE index 5a9e44128f1..09ef35a60b4 100644 --- a/models/biocro/LICENSE +++ b/models/biocro/LICENSE @@ -1,34 +1,3 @@ -## This is the master copy of the PEcAn License - -University of Illinois/NCSA Open Source License - -Copyright (c) 2012, University of Illinois, NCSA. All rights reserved. - -PEcAn project -www.pecanproject.org - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal with the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -- Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimers. -- Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimers in the - documentation and/or other materials provided with the distribution. -- Neither the names of University of Illinois, NCSA, nor the names - of its contributors may be used to endorse or promote products - derived from this Software without specific prior written permission. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR -ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF -CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE SOFTWARE. - +YEAR: 2024 +COPYRIGHT HOLDER: PEcAn Project +ORGANIZATION: PEcAn Project, authors affiliations diff --git a/models/biocro/NEWS.md b/models/biocro/NEWS.md new file mode 100644 index 00000000000..9947d8a8ac0 --- /dev/null +++ b/models/biocro/NEWS.md @@ -0,0 +1,7 @@ +# PEcAn.BIOCRO 1.7.3.9000 + +## License change +* PEcAn.BIOCRO is now distributed under the BSD three-clause license instead of the NCSA Open Source license. + +## Added +* Added a `NEWS.md` file to track changes to the package. Prior to this point changes are tracked in the main CHANGELOG for the PEcAn repository. diff --git a/models/biocro/R/get_biocro_defaults.R b/models/biocro/R/get_biocro_defaults.R index c470c80967d..ac729d90511 100644 --- a/models/biocro/R/get_biocro_defaults.R +++ b/models/biocro/R/get_biocro_defaults.R @@ -13,7 +13,7 @@ from_bc <- function(dfname){ #' *_initial_values, *_parameters, *_modules #' #' @param genus Name of the genus (or really any string BioCro uses as a *_parameters prefix) -#' @return a list in the format expected by \code{BioCro::\link[BioCro:Gro]{Gro}}, +#' @return a list in the format expected by `BioCro::Gro()`, #' containing four lists named `type`, `initial_values`, `parameters`, and `modules`, #' or NULL if genus not found #' @export diff --git a/models/biocro/R/met2model.BIOCRO.R b/models/biocro/R/met2model.BIOCRO.R index 638a1c7110b..9e993e59a6b 100644 --- a/models/biocro/R/met2model.BIOCRO.R +++ b/models/biocro/R/met2model.BIOCRO.R @@ -1,29 +1,23 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2012 University of Illinois, NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# University of Illinois/NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- .datatable.aware <- TRUE + + ##-------------------------------------------------------------------------------------------------# -##' Converts a met CF file to a model specific met file. The input -##' files are called /.YYYY.cf -##' -##' @name met2model.BIOCRO -##' @title Write BioCro met files -##' @param in.path path on disk where CF file lives -##' @param in.prefix prefix for each file -##' @param outfolder location where model specific output is written -##' @param lat,lon Site latitude and longitude -##' @param start_date,end_date Date range to convert. Each year will be written to a separate file -##' @param overwrite logical: Write over any existing file of the same name? If FALSE, leaves the existing file untouched and skips to the next year. -##' @param ... other arguments passed from PEcAn, currently ignored -##' @return a dataframe of information about the written file -##' @export -##' @author Rob Kooper, David LeBauer +#' Write BioCro met files +#' +#' Converts a met CF file to a model specific met file. The input +#' files are called /.YYYY.cf +#' +#' @param in.path path on disk where CF file lives +#' @param in.prefix prefix for each file +#' @param outfolder location where model specific output is written +#' @param lat,lon Site latitude and longitude +#' @param start_date,end_date Date range to convert. Each year will be written to a separate file +#' @param overwrite logical: Write over any existing file of the same name? If FALSE, leaves the existing file untouched and skips to the next year. +#' @param ... other arguments passed from PEcAn, currently ignored +#' @return a dataframe of information about the written file +#' @export +#' @author Rob Kooper, David LeBauer ##-------------------------------------------------------------------------------------------------# met2model.BIOCRO <- function(in.path, in.prefix, outfolder, overwrite = FALSE, lat, lon, start_date, end_date, ...) { @@ -102,46 +96,49 @@ met2model.BIOCRO <- function(in.path, in.prefix, outfolder, overwrite = FALSE, ##-------------------------------------------------------------------------------------------------# -##' Converts a CF data frame into a BioCro met input -##' -##' @name cf2biocro -##' @title Convert CF-formatted met data to BioCro met -##' @param met data.table object with met for a single site; output from \code{\link{load.cfmet}} -##' \itemize{ -##' \item {year} {int} -##' \item {month} {int} -##' \item {day} {int: day of month (1-31)} -##' \item {doy} {int: day of year (1-366)} -##' \item {hour} {int (0-23)} -##' \item {date} {YYYY-MM-DD HH:MM:SS POSIXct} -##' \item {wind_speed} {num m/s} -##' \item {northward_wind} -##' \item {eastward_wind} -##' \item {ppfd} {optional; if missing, requires surface_downwelling_shortwave_flux_in_air} -##' \item {surface_downwelling_shortwave_flux_in_air} -##' \item {air_pressure (Pa)} {optional; if missing, requires relative_humidity} -##' \item {specific_humidity} {optional; if missing, requires relative_humidity} -##' \item {relative_humidity} {optional; if missing, requires air_pressure and specific_humidity} -##' \item {precipitation_flux} -##' \item {air_temperature} -##' } -##' @param longitude in degrees east, used for calculating solar noon -##' @param zulu2solarnoon logical; if TRUE, convert time from GMT to local solar time. -##' @return data.table / data.frame with fields -##' \itemize{ -##' \item {doy} {day of year} -##' \item {hr} {hour} -##' \item {solar} {solar radiation (PPFD)} -##' \item {temp} {temperature, degrees celsius} -##' \item {rh} {relative humidity, as fraction (0-1)} -##' \item {windspeed} {m/s} -##' \item {precip} {cm/h} -##' } -##' @export cf2biocro -##' @importFrom data.table := -##' @author David LeBauer +#' Converts a CF data frame into a BioCro met input +#' +#' @param met data.table object with met for a single site; output from \code{\link{load.cfmet}} +#' \describe{ +#' \item{year}{int} +#' \item{month}{int} +#' \item{day}{int: day of month (1-31)} +#' \item{doy}{int: day of year (1-366)} +#' \item{hour}{int (0-23)} +#' \item{date}{YYYY-MM-DD HH:MM:SS POSIXct} +#' \item{wind_speed}{num m/s} +#' \item{northward_wind}{} +#' \item{eastward_wind}{} +#' \item{ppfd}{optional; if missing, requires surface_downwelling_shortwave_flux_in_air} +#' \item{surface_downwelling_shortwave_flux_in_air}{} +#' \item{air_pressure (Pa)}{optional; if missing, requires relative_humidity} +#' \item{specific_humidity}{optional; if missing, requires relative_humidity} +#' \item{relative_humidity}{optional; if missing, requires air_pressure and specific_humidity} +#' \item{precipitation_flux}{} +#' \item{air_temperature}{} +#' } +#' @param longitude in degrees east, used for calculating solar noon +#' @param zulu2solarnoon logical; if TRUE, convert time from GMT to local solar time. +#' @return data.table / data.frame with fields +#' \describe{ +#' \item{doy}{day of year} +#' \item{hr}{hour} +#' \item{solar}{solar radiation (PPFD)} +#' \item{temp}{temperature, degrees celsius} +#' \item{rh}{relative humidity, as fraction (0-1)} +#' \item{windspeed}{m/s} +#' \item{precip}{cm/h} +#' } +#' @export +#' @importFrom data.table := +#' @author David LeBauer cf2biocro <- function(met, longitude = NULL, zulu2solarnoon = FALSE) { + if (!data.table::is.data.table(met)) { + met <- data.table::copy(met) + data.table::setDT(met) + } + if ((!is.null(longitude)) & zulu2solarnoon) { solarnoon_offset <- PEcAn.utils::ud_convert(longitude/360, "day", "minute") met[, `:=`(solardate = met$date + lubridate::minutes(solarnoon_offset))] diff --git a/models/biocro/R/model2netcdf.BIOCRO.R b/models/biocro/R/model2netcdf.BIOCRO.R index 3d8269266cc..d6fb4dbbff7 100644 --- a/models/biocro/R/model2netcdf.BIOCRO.R +++ b/models/biocro/R/model2netcdf.BIOCRO.R @@ -1,28 +1,16 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2012 University of Illinois, NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# University of Illinois/NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- - -##--------------------------------------------------------------------------------------------------# -##' Convert BioCro output to netCDF -##' -##' Converts BioCro output to netCDF. -##' Modified from on model2netcdf.SIPNET and model2netcdf.ED2 by -##' @name model2netcdf.BIOCRO -##' @title Function to convert biocro model output to standard netCDF format -##' @param result a dataframe of model output to be converted -##' @param genus character: What kind of plant was being simulated? -##' Used to correct for some genus-specific differences in model output; -##' Eventually that will be handled inside BioCro and this argument will be removed. -##' @param outdir Location of model output -##' @param lat Latitude of the site -##' @param lon Longitude of the site -##' @export -##' @author David LeBauer, Deepak Jaiswal, Rob Kooper +#' Convert BioCro output to standard netCDF format +#' +#' Modified from on model2netcdf.SIPNET and model2netcdf.ED2 by +#' +#' @param result a dataframe of model output to be converted +#' @param genus character: What kind of plant was being simulated? +#' Used to correct for some genus-specific differences in model output; +#' Eventually that will be handled inside BioCro and this argument will be removed. +#' @param outdir Location of model output +#' @param lat Latitude of the site +#' @param lon Longitude of the site +#' @export +#' @author David LeBauer, Deepak Jaiswal, Rob Kooper model2netcdf.BIOCRO <- function(result, genus = NULL, outdir, lat = -9999, lon = -9999) { if (!("hour" %in% colnames(result))) { @@ -91,7 +79,7 @@ model2netcdf.BIOCRO <- function(result, genus = NULL, outdir, lat = -9999, lon = nc <- ncdf4::nc_open(ncfile, write = TRUE) } else { nc <- ncdf4::nc_create(filename = file.path(outdir, paste0(yeari, ".nc")), vars = vars) - ncdf4::ncatt_put(nc, 0, "description", "This is an output from the BioCro Crop model generated by the model2netcdf.BIOCRO.R function in the PEcAn.BIOCRO package; see https://pecanproject.github.io/pecan-documentation/master/ for more information") + ncdf4::ncatt_put(nc, 0, "description", "This is an output from the BioCro Crop model generated by the model2netcdf.BIOCRO.R function in the PEcAn.BIOCRO package; see https://pecanproject.github.io/pecan-documentation/latest/ for more information") } varfile <- file(file.path(outdir, paste(yeari, "nc", "var", sep = ".")), "w") diff --git a/models/biocro/R/read.biocro.config.R b/models/biocro/R/read.biocro.config.R index 2a47069be15..e1a58b6b19f 100644 --- a/models/biocro/R/read.biocro.config.R +++ b/models/biocro/R/read.biocro.config.R @@ -1,10 +1,9 @@ -##' Read BioCro config file -##' -##' @title Read BioCro Config -##' @param config.file Path to XML file -##' @return list of run configuration parameters for PEcAn -##' @export -##' @author David LeBauer +#' Read BioCro config file +#' +#' @param config.file Path to XML file +#' @return list of run configuration parameters for PEcAn +#' @export +#' @author David LeBauer read.biocro.config <- function(config.file = "config.xml") { config <- XML::xmlToList(XML::xmlTreeParse(file = config.file, handlers = list(comment = function(x) { NULL }), diff --git a/models/biocro/R/version.R b/models/biocro/R/version.R new file mode 100644 index 00000000000..0e58d885272 --- /dev/null +++ b/models/biocro/R/version.R @@ -0,0 +1,3 @@ +# Set at package install time, used by pecan.all::pecan_version() +# to identify development versions of packages +.build_hash <- Sys.getenv("PECAN_GIT_REV", "unknown") diff --git a/models/biocro/R/write.configs.BIOCRO.R b/models/biocro/R/write.configs.BIOCRO.R index 6bbca20db36..b7b869376a6 100644 --- a/models/biocro/R/write.configs.BIOCRO.R +++ b/models/biocro/R/write.configs.BIOCRO.R @@ -1,25 +1,16 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2012 University of Illinois, NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# University of Illinois/NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- PREFIX_XML <- "\n\n" -##' convert parameters from PEcAn database default units to biocro defaults -##' -##' Performs model specific unit conversions on a list of trait values, -##' such as those provided to write.config -##' @name convert.samples.BIOCRO -##' @title Convert samples for biocro -##' @param trait.samples a matrix or dataframe of samples from the trait distribution -##' @param biocro_version numeric, but currently only checks whether version is less than 1.0 -##' @return dataframe with values transformed -##' @export -##' @author David LeBauer +#' convert parameters from PEcAn database default units to biocro defaults +#' +#' Performs model specific unit conversions on a list of trait values, +#' such as those provided to write.config +#' +#' @param trait.samples a matrix or dataframe of samples from the trait distribution +#' @param biocro_version numeric, but currently only checks whether version is less than 1.0 +#' @return dataframe with values transformed +#' @export +#' @author David LeBauer convert.samples.BIOCRO <- function(trait.samples, biocro_version=1.0) { trait.samples <- as.data.frame(trait.samples) @@ -64,19 +55,17 @@ convert.samples.BIOCRO <- function(trait.samples, biocro_version=1.0) { } # convert.samples.BIOCRO -##' Writes a configuration files for the biocro model -##' -##' @name write.config.BIOCRO -##' @title Write configuration files for the biocro model -##' @param defaults named list with default model parameter values -##' @param trait.values named list (or dataframe of trait values) -##' can either be a data.frame or named list of traits, e.g. -##' \code{data.frame(vmax = 1, b0 = 2)} or \code{list(vmax = 1, b0 = 2)} -##' @param settings pecan settings file configured for BioCro -##' @param run.id integer; a unique identifier for the run. -##' @export -##' @return nothing, writes configuration file as side effect -##' @author David LeBauer +#' Writes a configuration files for the biocro model +#' +#' @param defaults named list with default model parameter values +#' @param trait.values named list (or dataframe of trait values) +#' can either be a data.frame or named list of traits, e.g. +#' \code{data.frame(vmax = 1, b0 = 2)} or \code{list(vmax = 1, b0 = 2)} +#' @param settings pecan settings file configured for BioCro +#' @param run.id integer; a unique identifier for the run. +#' @export +#' @return nothing, writes configuration file as side effect +#' @author David LeBauer write.config.BIOCRO <- function(defaults = NULL, trait.values, settings, run.id) { ## find out where to write run/ouput @@ -209,15 +198,13 @@ write.config.BIOCRO <- function(defaults = NULL, trait.values, settings, run.id) } # write.config.BIOCRO -##' Clear out previous config and parameter files. -##' -##' @name remove.config.BIOCRO -##' @title Clear out previous biocro config and parameter files. -##' @param main.outdir Primary PEcAn output directory (will be depreciated) -##' @param settings PEcAn settings file -##' @return nothing, removes config files as side effect -##' @export -##' @author Shawn Serbin, David LeBauer +#' Clear out previous config and parameter files. +#' +#' @param main.outdir Primary PEcAn output directory (will be depreciated) +#' @param settings PEcAn settings file +#' @return nothing, removes config files as side effect +#' @export +#' @author Shawn Serbin, David LeBauer remove.config.BIOCRO <- function(main.outdir, settings) { ## Remove files on localhost diff --git a/models/biocro/man/cf2biocro.Rd b/models/biocro/man/cf2biocro.Rd index 975d23d8436..8999385451e 100644 --- a/models/biocro/man/cf2biocro.Rd +++ b/models/biocro/man/cf2biocro.Rd @@ -2,29 +2,29 @@ % Please edit documentation in R/met2model.BIOCRO.R \name{cf2biocro} \alias{cf2biocro} -\title{Convert CF-formatted met data to BioCro met} +\title{Converts a CF data frame into a BioCro met input} \usage{ cf2biocro(met, longitude = NULL, zulu2solarnoon = FALSE) } \arguments{ \item{met}{data.table object with met for a single site; output from \code{\link{load.cfmet}} -\itemize{ -\item {year} {int} -\item {month} {int} -\item {day} {int: day of month (1-31)} -\item {doy} {int: day of year (1-366)} -\item {hour} {int (0-23)} -\item {date} {YYYY-MM-DD HH:MM:SS POSIXct} -\item {wind_speed} {num m/s} -\item {northward_wind} -\item {eastward_wind} -\item {ppfd} {optional; if missing, requires surface_downwelling_shortwave_flux_in_air} -\item {surface_downwelling_shortwave_flux_in_air} -\item {air_pressure (Pa)} {optional; if missing, requires relative_humidity} -\item {specific_humidity} {optional; if missing, requires relative_humidity} -\item {relative_humidity} {optional; if missing, requires air_pressure and specific_humidity} -\item {precipitation_flux} -\item {air_temperature} +\describe{ +\item{year}{int} +\item{month}{int} +\item{day}{int: day of month (1-31)} +\item{doy}{int: day of year (1-366)} +\item{hour}{int (0-23)} +\item{date}{YYYY-MM-DD HH:MM:SS POSIXct} +\item{wind_speed}{num m/s} +\item{northward_wind}{} +\item{eastward_wind}{} +\item{ppfd}{optional; if missing, requires surface_downwelling_shortwave_flux_in_air} +\item{surface_downwelling_shortwave_flux_in_air}{} +\item{air_pressure (Pa)}{optional; if missing, requires relative_humidity} +\item{specific_humidity}{optional; if missing, requires relative_humidity} +\item{relative_humidity}{optional; if missing, requires air_pressure and specific_humidity} +\item{precipitation_flux}{} +\item{air_temperature}{} }} \item{longitude}{in degrees east, used for calculating solar noon} @@ -33,14 +33,14 @@ cf2biocro(met, longitude = NULL, zulu2solarnoon = FALSE) } \value{ data.table / data.frame with fields -\itemize{ -\item {doy} {day of year} -\item {hr} {hour} -\item {solar} {solar radiation (PPFD)} -\item {temp} {temperature, degrees celsius} -\item {rh} {relative humidity, as fraction (0-1)} -\item {windspeed} {m/s} -\item {precip} {cm/h} +\describe{ +\item{doy}{day of year} +\item{hr}{hour} +\item{solar}{solar radiation (PPFD)} +\item{temp}{temperature, degrees celsius} +\item{rh}{relative humidity, as fraction (0-1)} +\item{windspeed}{m/s} +\item{precip}{cm/h} } } \description{ diff --git a/models/biocro/man/convert.samples.BIOCRO.Rd b/models/biocro/man/convert.samples.BIOCRO.Rd index 8964f3284de..28f50af2967 100644 --- a/models/biocro/man/convert.samples.BIOCRO.Rd +++ b/models/biocro/man/convert.samples.BIOCRO.Rd @@ -2,7 +2,7 @@ % Please edit documentation in R/write.configs.BIOCRO.R \name{convert.samples.BIOCRO} \alias{convert.samples.BIOCRO} -\title{Convert samples for biocro} +\title{convert parameters from PEcAn database default units to biocro defaults} \usage{ convert.samples.BIOCRO(trait.samples, biocro_version = 1) } @@ -15,9 +15,6 @@ convert.samples.BIOCRO(trait.samples, biocro_version = 1) dataframe with values transformed } \description{ -convert parameters from PEcAn database default units to biocro defaults -} -\details{ Performs model specific unit conversions on a list of trait values, such as those provided to write.config } diff --git a/models/biocro/man/get_biocro_defaults.Rd b/models/biocro/man/get_biocro_defaults.Rd index 7d39aa0f35b..75d628fac4f 100644 --- a/models/biocro/man/get_biocro_defaults.Rd +++ b/models/biocro/man/get_biocro_defaults.Rd @@ -10,7 +10,7 @@ get_biocro_defaults(genus) \item{genus}{Name of the genus (or really any string BioCro uses as a *_parameters prefix)} } \value{ -a list in the format expected by \code{BioCro::\link[BioCro:Gro]{Gro}}, +a list in the format expected by `BioCro::Gro()`, containing four lists named `type`, `initial_values`, `parameters`, and `modules`, or NULL if genus not found } diff --git a/models/biocro/man/model2netcdf.BIOCRO.Rd b/models/biocro/man/model2netcdf.BIOCRO.Rd index 236fe0879c0..acc489139f0 100644 --- a/models/biocro/man/model2netcdf.BIOCRO.Rd +++ b/models/biocro/man/model2netcdf.BIOCRO.Rd @@ -2,7 +2,7 @@ % Please edit documentation in R/model2netcdf.BIOCRO.R \name{model2netcdf.BIOCRO} \alias{model2netcdf.BIOCRO} -\title{Function to convert biocro model output to standard netCDF format} +\title{Convert BioCro output to standard netCDF format} \usage{ model2netcdf.BIOCRO(result, genus = NULL, outdir, lat = -9999, lon = -9999) } @@ -20,10 +20,6 @@ Eventually that will be handled inside BioCro and this argument will be removed. \item{lon}{Longitude of the site} } \description{ -Convert BioCro output to netCDF -} -\details{ -Converts BioCro output to netCDF. Modified from on model2netcdf.SIPNET and model2netcdf.ED2 by } \author{ diff --git a/models/biocro/man/read.biocro.config.Rd b/models/biocro/man/read.biocro.config.Rd index 6240c283018..2b4a5ac910f 100644 --- a/models/biocro/man/read.biocro.config.Rd +++ b/models/biocro/man/read.biocro.config.Rd @@ -2,7 +2,7 @@ % Please edit documentation in R/read.biocro.config.R \name{read.biocro.config} \alias{read.biocro.config} -\title{Read BioCro Config} +\title{Read BioCro config file} \usage{ read.biocro.config(config.file = "config.xml") } diff --git a/models/biocro/man/remove.config.BIOCRO.Rd b/models/biocro/man/remove.config.BIOCRO.Rd index 7ceb7aeed94..60b926e7980 100644 --- a/models/biocro/man/remove.config.BIOCRO.Rd +++ b/models/biocro/man/remove.config.BIOCRO.Rd @@ -2,7 +2,7 @@ % Please edit documentation in R/write.configs.BIOCRO.R \name{remove.config.BIOCRO} \alias{remove.config.BIOCRO} -\title{Clear out previous biocro config and parameter files.} +\title{Clear out previous config and parameter files.} \usage{ remove.config.BIOCRO(main.outdir, settings) } diff --git a/models/biocro/man/write.config.BIOCRO.Rd b/models/biocro/man/write.config.BIOCRO.Rd index 26505e69d8a..9856d784278 100644 --- a/models/biocro/man/write.config.BIOCRO.Rd +++ b/models/biocro/man/write.config.BIOCRO.Rd @@ -2,7 +2,7 @@ % Please edit documentation in R/write.configs.BIOCRO.R \name{write.config.BIOCRO} \alias{write.config.BIOCRO} -\title{Write configuration files for the biocro model} +\title{Writes a configuration files for the biocro model} \usage{ write.config.BIOCRO(defaults = NULL, trait.values, settings, run.id) } diff --git a/models/biocro/tests/Rcheck_reference.log b/models/biocro/tests/Rcheck_reference.log index 20da0a9ad2a..31fb6ae89ff 100644 --- a/models/biocro/tests/Rcheck_reference.log +++ b/models/biocro/tests/Rcheck_reference.log @@ -12,43 +12,6 @@ Maintainer: ‘David LeBauer ’ New submission -License components with restrictions and base license permitting such: - BSD_3_clause + file LICENSE -File 'LICENSE': - ## This is the master copy of the PEcAn License - - University of Illinois/NCSA Open Source License - - Copyright (c) 2012, University of Illinois, NCSA. All rights reserved. - - PEcAn project - www.pecanproject.org - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal with the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - - Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimers. - - Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimers in the - documentation and/or other materials provided with the distribution. - - Neither the names of University of Illinois, NCSA, nor the names - of its contributors may be used to endorse or promote products - derived from this Software without specific prior written permission. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - IN NO EVENT SHALL THE CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR - ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF - CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE SOFTWARE. - Unknown, possibly misspelled, fields in DESCRIPTION: ‘Remotes’ @@ -58,7 +21,6 @@ Strong dependencies not in mainstream repositories: Suggests or Enhances not in mainstream repositories: BioCro, PEcAn.DB -The Date field is over a month old. * checking package namespace information ... OK * checking package dependencies ... OK * checking if this is a source package ... OK @@ -72,18 +34,8 @@ The Date field is over a month old. * checking installed package size ... OK * checking package directory ... OK * checking for future file timestamps ... OK -* checking DESCRIPTION meta-information ... NOTE -Author field differs from that derived from Authors@R - Author: ‘David LeBauer, Deepak Jaiswal, Christopher Black’ - Authors@R: ‘David LeBauer [aut, cre], Chris Black [aut], Deepak Jaiswal [aut], University of Illinois, NCSA [cph]’ - -Maintainer field differs from that derived from Authors@R - Maintainer: ‘David LeBauer ’ - Authors@R: ‘David LeBauer ’ - -* checking top-level files ... NOTE -Non-standard files/directories found at top level: - ‘Dockerfile’ ‘model_info.json’ +* checking DESCRIPTION meta-information ... OK +* checking top-level files ... OK * checking for left-over files ... OK * checking index information ... OK * checking package subdirectories ... OK @@ -120,10 +72,7 @@ See section 'Cross-references' in the 'Writing R Extensions' manual. * checking Rd \usage sections ... OK * checking Rd contents ... OK * checking for unstated dependencies in examples ... OK -* checking files in ‘vignettes’ ... WARNING -Files in the 'vignettes' directory but no files in 'inst/doc': - ‘C4grass_sa_vd.Rmd’, ‘sa.output.Rdata’, ‘workflow.R’, ‘workflow.Rmd’ -Package has no Sweave vignette sources and no VignetteBuilder field. +* checking files in ‘vignettes’ ... OK * checking examples ... NONE * checking for unstated dependencies in ‘tests’ ... OK * checking tests ... SKIPPED diff --git a/models/biocro/tests/testthat.R b/models/biocro/tests/testthat.R index 31a95f2fe7e..da34f056ebe 100644 --- a/models/biocro/tests/testthat.R +++ b/models/biocro/tests/testthat.R @@ -1,10 +1,3 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2012 University of Illinois, NCSA. All rights reserved. This -# program and the accompanying materials are made available under the terms of -# the University of Illinois/NCSA Open Source License which accompanies this -# distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- library(PEcAn.utils) library(PEcAn.settings) library(testthat) diff --git a/models/biocro/vignettes/C4grass_sa_vd.Rmd b/models/biocro/vignettes/C4grass_sa_vd.Rmd index a248465c89d..5e9e4126e08 100644 --- a/models/biocro/vignettes/C4grass_sa_vd.Rmd +++ b/models/biocro/vignettes/C4grass_sa_vd.Rmd @@ -1,6 +1,13 @@ +--- +title: "Sensitivity analysis on C4 grass model" +output: html_vignette +vignette: > + %\VignetteIndexEntry{Sensitivity analysis on C4 grass model} + %\VignetteEngine{knitr::rmarkdown} +--- -```{r} +```{r eval=FALSE} library(PEcAn.all) logger.setQuitOnSevere(FALSE) diff --git a/models/biocro/vignettes/workflow.R b/models/biocro/vignettes/workflow.R deleted file mode 100644 index f47d8c812ee..00000000000 --- a/models/biocro/vignettes/workflow.R +++ /dev/null @@ -1,46 +0,0 @@ - -## @knitr , echo=FALSE,warning=FALSE -library(PEcAn.all) - - -## @knitr , echo=FALSE,warning=FALSE -settings <- read.settings(system.file("extdata/pecan.biocro.xml", package = "PEcAn.BIOCRO")) - -### limit scope for initial testing -settings$sensitivity.analysis$quantiles <- list(sigma = 0.5) -model <- settings$model$type - - - - -## @knitr , echo=FALSE,warning=FALSE,cache=TRUE - -# Query the trait database for data and priors -settings$pfts <- get.trait.data(settings$pfts, settings$model$type, settings$database$dbfiles, - settings$database$bety, settings$meta.analysis$update) - -# Run the PEcAn meta.analysis -run.meta.analysis(settings$pfts, settings$meta.analysis$iter, settings$meta.analysis$random.effects$on, - settings$meta.analysis$threshold, settings$database$dbfiles, settings$database$bety) - -## @knitr , echo=FALSE,warning=FALSE,cache=TRUE -run.write.configs(model) # Calls model specific write.configs e.g. write.config.ed.R -## load met data -PEcAn.workflow::start_model_runs(model) # Start ecosystem model runs -read.outputs(settings$model$type, settings) -# read.outputs(model, settings) #, variables = 'StemBiom') - -get.results(settings) # Get results of model runs - -# run.sensitivity.analysis() # Run sensitivity analysis and variance -# decomposition on model output - -# run.ensemble.analysis() # Run ensemble analysis on model output. OPTIONAL: -# run.ensemble.analysis(plot.timeseries=TRUE) to get an esemble time-series -# output for the target variables set in the PEcAn.xml file - -### PEcAn workflow run complete -print("---------- PEcAn Workflow Complete ----------") -#--------------------------------------------------------------------------------------------------# - - diff --git a/models/biocro/vignettes/workflow.Rmd b/models/biocro/vignettes/workflow.Rmd deleted file mode 100644 index c8e8f32b263..00000000000 --- a/models/biocro/vignettes/workflow.Rmd +++ /dev/null @@ -1,54 +0,0 @@ -BioCro PEcAn workflow -====================== - -```{r, echo=FALSE,warning=FALSE} -library(PEcAn.all) -``` -### Load PEcAn settings file. - -Open and read in settings file for PEcAn run. - - -```{r, echo=FALSE,warning=FALSE} -library(PEcAn.settings) -settings <- read.settings("~/dev/willow_da/vignettes/pecan.biocro.xml") -settings$sensitivity.analysis <- settings$ensemble -model <- settings$model$type - -``` -### Query database for trait data - -```{r, echo=FALSE,warning=FALSE,cache=TRUE} -library(PEcAn.DB) -settings$pfts <- get.trait.data(settings$pfts, settings$model$type, settings$run$dbfiles, settings$database$bety, settings$meta.analysis$update)# Query the trait database for data and priors -``` - -### Run Meta-analysis -```{r, echo=FALSE,warning=FALSE,cache=TRUE} -run.meta.analysis(settings$pfts, settings$meta.analysis$iter, settings$meta.analysis$random.effects$on, settings$meta.analysis$threshold, settings$run$dbfiles, settings$database$bety) -``` - - - -```{r, echo=FALSE,warning=FALSE,cache=TRUE} -run.write.configs(settings, settings$database$bety$write) -``` - -```{r, echo=FALSE,warning=FALSE,cache=TRUE} -## load met data -PEcAn.workflow::start_model_runs(settings, settings$database$bety$write) # Start ecosystem model runs -``` - -```{r, echo=FALSE,warning=FALSE,cache=TRUE} -convert.outputs(model = settings$model$name, settings = settings) - -run.sensitivity.analysis() # Run sensitivity analysis and variance decomposition on model output - -run.ensemble.analysis() # Run ensemble analysis on model output. - # OPTIONAL: run.ensemble.analysis(plot.timeseries=TRUE) to get an esemble - # time-series output for the target variables set in the PEcAn.xml file - -### PEcAn workflow run complete -print("---------- PEcAn Workflow Complete ----------") -#--------------------------------------------------------------------------------------------------# -``` diff --git a/models/cable/DESCRIPTION b/models/cable/DESCRIPTION index 621b64ba9ad..d7de137afa5 100644 --- a/models/cable/DESCRIPTION +++ b/models/cable/DESCRIPTION @@ -1,8 +1,7 @@ Package: PEcAn.CABLE Type: Package Title: PEcAn package for integration of the CABLE model -Version: 1.7.2 -Date: 2021-10-04 +Version: 1.7.3.9000 Authors@R: c(person("Kaitlin", "Ragosta", role = c("aut")), person("Tony", "Gardella", role = c("aut", "cre"), email = "tonygard@bu.edu"), @@ -22,4 +21,4 @@ Copyright: Authors LazyLoad: yes LazyData: FALSE Encoding: UTF-8 -RoxygenNote: 6.1.0 +RoxygenNote: 7.3.2 diff --git a/models/cable/man/write_restart.CABLE.Rd b/models/cable/man/write_restart.CABLE.Rd index 52945b79ee6..aad831a79c9 100644 --- a/models/cable/man/write_restart.CABLE.Rd +++ b/models/cable/man/write_restart.CABLE.Rd @@ -4,8 +4,7 @@ \alias{write_restart.CABLE} \title{Write restart template for SDA} \usage{ -write_restart.CABLE(outdir, runid, start.time, stop.time, settings, - new.state) +write_restart.CABLE(outdir, runid, start.time, stop.time, settings, new.state) } \arguments{ \item{start.time}{Time of current assimilation step} diff --git a/models/clm45/DESCRIPTION b/models/clm45/DESCRIPTION index 6828f45dc5b..363b065e63c 100644 --- a/models/clm45/DESCRIPTION +++ b/models/clm45/DESCRIPTION @@ -1,13 +1,10 @@ Package: PEcAn.CLM45 Type: Package Title: PEcAn Package for Integration of CLM4.5 Model -Version: 1.7.2 -Date: 2021-10-04 +Version: 1.7.3.9000 Authors@R: c(person("Mike", "Dietze", role = c("aut", "cre"), email = "dietze@bu.edu"), person("University of Illinois, NCSA", role = c("cph"))) -Author: Mike Dietze -Maintainer: Mike Dietze Description: The Predictive Ecosystem Carbon Analyzer (PEcAn) is a scientific workflow management tool that is designed to simplify the management of model parameterization, execution, and analysis. The goal of PECAn is to @@ -26,4 +23,4 @@ Copyright: Authors LazyLoad: yes LazyData: FALSE Encoding: UTF-8 -RoxygenNote: 7.2.3 +RoxygenNote: 7.3.2 diff --git a/models/clm45/LICENSE b/models/clm45/LICENSE index 9e38c2dc685..09ef35a60b4 100644 --- a/models/clm45/LICENSE +++ b/models/clm45/LICENSE @@ -1,29 +1,3 @@ -University of Illinois/NCSA Open Source License - -Copyright (c) 2012, University of Illinois, NCSA. All rights reserved. - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal with the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -- Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimers. -- Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimers in the - documentation and/or other materials provided with the distribution. -- Neither the names of University of Illinois, NCSA, nor the names - of its contributors may be used to endorse or promote products - derived from this Software without specific prior written permission. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR -ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF -CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE SOFTWARE. - +YEAR: 2024 +COPYRIGHT HOLDER: PEcAn Project +ORGANIZATION: PEcAn Project, authors affiliations diff --git a/models/clm45/NEWS.md b/models/clm45/NEWS.md new file mode 100644 index 00000000000..72171423795 --- /dev/null +++ b/models/clm45/NEWS.md @@ -0,0 +1,10 @@ +# PEcAn.CLM45 1.7.3.9000 + +## License change +* PEcAn.CLM45 is now distributed under the BSD three-clause license instead of the NCSA Open Source license. + + +# PEcAn.CLM45 1.7.1 + +* All changes in 1.7.1 and earlier were recorded in a single file for all of the PEcAn packages; please see +https://github.com/PecanProject/pecan/blob/v1.7.1/CHANGELOG.md for details. diff --git a/models/clm45/R/met2model.CLM45.R b/models/clm45/R/met2model.CLM45.R index 60dda63ca0f..26aa11935a0 100644 --- a/models/clm45/R/met2model.CLM45.R +++ b/models/clm45/R/met2model.CLM45.R @@ -1,12 +1,3 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2015 NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# University of Illinois/NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- -# ## R Code to convert NetCDF CF met files into NetCDF CLM met files. ##' met2model wrapper for CLM45 @@ -19,6 +10,8 @@ ##' @param start_date the start date of the data to be downloaded (will only use the year part of the date) ##' @param end_date the end date of the data to be downloaded (will only use the year part of the date) ##' @param lst timezone offset to GMT in hours +##' @param lat,lon site coordinates +##' @param ... other arguments, currently ignored ##' @param overwrite should existing files be overwritten ##' @param verbose should the function be very verbosefor(year in start_year:end_year) met2model.CLM45 <- function(in.path,in.prefix,outfolder,start_date, end_date, lst=0,lat,lon,..., overwrite=FALSE,verbose=FALSE){ diff --git a/models/clm45/R/model2netcdf.CLM45.R b/models/clm45/R/model2netcdf.CLM45.R index 6f028bf4768..ec98d75e13a 100644 --- a/models/clm45/R/model2netcdf.CLM45.R +++ b/models/clm45/R/model2netcdf.CLM45.R @@ -1,11 +1,3 @@ -## ------------------------------------------------------------------------------- -## Copyright (c) 2015 NCSA. -## All rights reserved. This program and the accompanying materials -## are made available under the terms of the -## University of Illinois/NCSA Open Source License -## which accompanies this distribution, and is available at -## http://opensource.ncsa.illinois.edu/license.html -## --------------------------------------------------------------------- ##' ##' @name model2netcdf.CLM45 ##' @title Code to convert CLM45 netcdf output into into CF standard diff --git a/models/clm45/R/version.R b/models/clm45/R/version.R new file mode 100644 index 00000000000..0e58d885272 --- /dev/null +++ b/models/clm45/R/version.R @@ -0,0 +1,3 @@ +# Set at package install time, used by pecan.all::pecan_version() +# to identify development versions of packages +.build_hash <- Sys.getenv("PECAN_GIT_REV", "unknown") diff --git a/models/clm45/R/write.configs.CLM45.R b/models/clm45/R/write.configs.CLM45.R index 205e733537e..b2ed9ea6ed6 100644 --- a/models/clm45/R/write.configs.CLM45.R +++ b/models/clm45/R/write.configs.CLM45.R @@ -1,18 +1,9 @@ -##------------------------------------------------------------------------------ -##Copyright (c) 2015 NCSA -##All rights reserved. This program and the accompanying materials -##are made available under the terms of the -##University of Illinois/NCSA Open Source License -##which accompanies this distribution, and is available at -##http://opensource.ncsa.illinois.edu/license.html -##------------------------------------------------------------------------------ -##-------------------------------------------------------------------------------------------------# ##' Writes config files for use with the Community Land Model model. ##' ##' @name write.config.CLM45 ##' @title Write CLM4.5 configuration files ##' @param defaults list of defaults to process -##' @param trait.samples vector of samples for a given trait +##' @param trait.values vector of samples for a given trait ##' @param settings list of settings from pecan settings file ##' @param run.id id of run ##' @return none diff --git a/models/clm45/man/met2model.CLM45.Rd b/models/clm45/man/met2model.CLM45.Rd index 16d89ea6bd7..c2a76033f27 100644 --- a/models/clm45/man/met2model.CLM45.Rd +++ b/models/clm45/man/met2model.CLM45.Rd @@ -31,6 +31,10 @@ met2model.CLM45( \item{lst}{timezone offset to GMT in hours} +\item{lat, lon}{site coordinates} + +\item{...}{other arguments, currently ignored} + \item{overwrite}{should existing files be overwritten} \item{verbose}{should the function be very verbosefor(year in start_year:end_year)} diff --git a/models/clm45/man/write.config.CLM45.Rd b/models/clm45/man/write.config.CLM45.Rd index 83ca34b8c32..2532a4ceca7 100644 --- a/models/clm45/man/write.config.CLM45.Rd +++ b/models/clm45/man/write.config.CLM45.Rd @@ -9,11 +9,11 @@ write.config.CLM45(defaults, trait.values, settings, run.id) \arguments{ \item{defaults}{list of defaults to process} +\item{trait.values}{vector of samples for a given trait} + \item{settings}{list of settings from pecan settings file} \item{run.id}{id of run} - -\item{trait.samples}{vector of samples for a given trait} } \value{ none diff --git a/models/clm45/tests/Rcheck_reference.log b/models/clm45/tests/Rcheck_reference.log index 3675c3c1761..bd8698df72d 100644 --- a/models/clm45/tests/Rcheck_reference.log +++ b/models/clm45/tests/Rcheck_reference.log @@ -61,11 +61,7 @@ The Date field is over a month old. * checking installed package size ... OK * checking package directory ... OK * checking for future file timestamps ... OK -* checking DESCRIPTION meta-information ... NOTE -Author field differs from that derived from Authors@R - Author: ‘Mike Dietze’ - Authors@R: ‘Mike Dietze [aut, cre], University of Illinois, NCSA [cph]’ - +* checking DESCRIPTION meta-information ... OK * checking top-level files ... OK * checking for left-over files ... OK * checking index information ... OK diff --git a/models/clm45/tests/testthat.R b/models/clm45/tests/testthat.R index 2582bc74de6..bd9de84301b 100644 --- a/models/clm45/tests/testthat.R +++ b/models/clm45/tests/testthat.R @@ -1,11 +1,3 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2012 University of Illinois, NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# University of Illinois/NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- library(testthat) library(PEcAn.utils) diff --git a/models/dalec/DESCRIPTION b/models/dalec/DESCRIPTION index 289f2c7d040..a73bd635c4d 100644 --- a/models/dalec/DESCRIPTION +++ b/models/dalec/DESCRIPTION @@ -1,12 +1,12 @@ Package: PEcAn.DALEC Type: Package Title: PEcAn Package for Integration of the DALEC Model -Version: 1.7.2 -Date: 2021-10-04 +Version: 1.7.3.9000 Authors@R: c(person("Mike", "Dietze", role = c("aut", "cre"), email = "dietze@bu.edu"), person("Tristan", "Quaife", role = c("aut")), - person("University of Illinois, NCSA", role = c("cph"))) + person("University of Illinois, NCSA", role = c("cph")), + person("Boston University", role = c("cph"))) Author: Mike Dietze, Tristain Quaife Maintainer: Mike Dietze Description: This module provides functions to link DALEC to PEcAn. @@ -25,4 +25,4 @@ Copyright: Authors LazyLoad: yes LazyData: FALSE Encoding: UTF-8 -RoxygenNote: 7.2.3 +RoxygenNote: 7.3.2 diff --git a/models/dalec/LICENSE b/models/dalec/LICENSE index 5a9e44128f1..09ef35a60b4 100644 --- a/models/dalec/LICENSE +++ b/models/dalec/LICENSE @@ -1,34 +1,3 @@ -## This is the master copy of the PEcAn License - -University of Illinois/NCSA Open Source License - -Copyright (c) 2012, University of Illinois, NCSA. All rights reserved. - -PEcAn project -www.pecanproject.org - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal with the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -- Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimers. -- Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimers in the - documentation and/or other materials provided with the distribution. -- Neither the names of University of Illinois, NCSA, nor the names - of its contributors may be used to endorse or promote products - derived from this Software without specific prior written permission. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR -ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF -CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE SOFTWARE. - +YEAR: 2024 +COPYRIGHT HOLDER: PEcAn Project +ORGANIZATION: PEcAn Project, authors affiliations diff --git a/models/dalec/NEWS.md b/models/dalec/NEWS.md new file mode 100644 index 00000000000..cd70e8ae222 --- /dev/null +++ b/models/dalec/NEWS.md @@ -0,0 +1,10 @@ +# PEcAn.DALEC 1.7.3.9000 + +## License change +* PEcAn.DALEC is now distributed under the BSD three-clause license instead of the NCSA Open Source license. + + +# PEcAn.DALEC 1.7.1 + +* All changes in 1.7.1 and earlier were recorded in a single file for all of the PEcAn packages; please see +https://github.com/PecanProject/pecan/blob/v1.7.1/CHANGELOG.md for details. diff --git a/models/dalec/R/met2model.DALEC.R b/models/dalec/R/met2model.DALEC.R index 242bfe61110..0ce459df567 100644 --- a/models/dalec/R/met2model.DALEC.R +++ b/models/dalec/R/met2model.DALEC.R @@ -1,12 +1,3 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2015 Boston University, NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- - # R Code to convert NetCDF CF met files into DALEC met files ## If files already exist in 'Outfolder', the default function is NOT to overwrite them and only @@ -24,6 +15,9 @@ ##' @param end_date the end date of the data to be downloaded (will only use the year part of the date) ##' @param overwrite should existing files be overwritten ##' @param verbose should the function be very verbose +##' @param spin_nyear,spin_nsample,spin_resample passed on to +##' `PEcAn.data.atmosphere::spin.met()` +##' @param ... additional arguments, currently ignored met2model.DALEC <- function(in.path, in.prefix, outfolder, start_date, end_date, overwrite = FALSE, verbose = FALSE, spin_nyear=NULL,spin_nsample=NULL,spin_resample=NULL, ...) { diff --git a/models/dalec/R/model2netcdf.DALEC.R b/models/dalec/R/model2netcdf.DALEC.R index 71e7b9a9ef0..18be5fcb01f 100644 --- a/models/dalec/R/model2netcdf.DALEC.R +++ b/models/dalec/R/model2netcdf.DALEC.R @@ -1,13 +1,3 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2015 Boston University, NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- - -#--------------------------------------------------------------------------------------------------# ##' Convert DALEC output to netCDF ##' ##' Converts all output contained in a folder to netCDF. diff --git a/models/dalec/R/version.R b/models/dalec/R/version.R new file mode 100644 index 00000000000..0e58d885272 --- /dev/null +++ b/models/dalec/R/version.R @@ -0,0 +1,3 @@ +# Set at package install time, used by pecan.all::pecan_version() +# to identify development versions of packages +.build_hash <- Sys.getenv("PECAN_GIT_REV", "unknown") diff --git a/models/dalec/R/write.configs.dalec.R b/models/dalec/R/write.configs.dalec.R index 21e481c242a..64859b1c1fe 100644 --- a/models/dalec/R/write.configs.dalec.R +++ b/models/dalec/R/write.configs.dalec.R @@ -1,12 +1,3 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2015 Boston University, NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- - #--------------------------------------------------------------------------------------------------# # Template for functions to prepare and write out files model-specific configuration files for MA #--------------------------------------------------------------------------------------------------# @@ -81,10 +72,11 @@ convert.samples.DALEC <- function(trait.samples) { ##' write Dalec Configuration files ##' ##' @title write.config.DALEC -##' @param defaults -##' @param trait.values -##' @param settings -##' @param run.id +##' @param defaults ignored +##' @param trait.values vector of samples for a given trait +##' @param settings a PEcAn settings object +##' @param run.id Unique identifier for the run, +##' used here to construct output directories and filenames. ##' @return configuration files ##' @export write.config.DALEC write.config.DALEC <- function(defaults, trait.values, settings, run.id) { diff --git a/models/dalec/man/met2model.DALEC.Rd b/models/dalec/man/met2model.DALEC.Rd index 66ef48f260a..f6fff625826 100644 --- a/models/dalec/man/met2model.DALEC.Rd +++ b/models/dalec/man/met2model.DALEC.Rd @@ -32,6 +32,11 @@ met2model.DALEC( \item{overwrite}{should existing files be overwritten} \item{verbose}{should the function be very verbose} + +\item{spin_nyear, spin_nsample, spin_resample}{passed on to +`PEcAn.data.atmosphere::spin.met()`} + +\item{...}{additional arguments, currently ignored} } \description{ met2model for DALEC diff --git a/models/dalec/man/write.config.DALEC.Rd b/models/dalec/man/write.config.DALEC.Rd index 4d0329829db..559b3ca3cd6 100644 --- a/models/dalec/man/write.config.DALEC.Rd +++ b/models/dalec/man/write.config.DALEC.Rd @@ -6,6 +6,16 @@ \usage{ write.config.DALEC(defaults, trait.values, settings, run.id) } +\arguments{ +\item{defaults}{ignored} + +\item{trait.values}{vector of samples for a given trait} + +\item{settings}{a PEcAn settings object} + +\item{run.id}{Unique identifier for the run, +used here to construct output directories and filenames.} +} \value{ configuration files } diff --git a/models/dalec/tests/Rcheck_reference.log b/models/dalec/tests/Rcheck_reference.log index a4ac261c1d4..97412f1c129 100644 --- a/models/dalec/tests/Rcheck_reference.log +++ b/models/dalec/tests/Rcheck_reference.log @@ -66,11 +66,7 @@ The Date field is over a month old. * checking installed package size ... OK * checking package directory ... OK * checking for future file timestamps ... OK -* checking DESCRIPTION meta-information ... NOTE -Author field differs from that derived from Authors@R - Author: ‘Mike Dietze, Tristain Quaife’ - Authors@R: ‘Mike Dietze [aut, cre], Tristan Quaife [aut], University of Illinois, NCSA [cph]’ - +* checking DESCRIPTION meta-information ... OK * checking top-level files ... OK * checking for left-over files ... OK * checking index information ... OK diff --git a/models/dvmdostem/DESCRIPTION b/models/dvmdostem/DESCRIPTION index 07f2674be99..a22b0daa62e 100644 --- a/models/dvmdostem/DESCRIPTION +++ b/models/dvmdostem/DESCRIPTION @@ -1,8 +1,7 @@ Package: PEcAn.dvmdostem Type: Package Title: PEcAn Package for Integration of the Dvmdostem Model -Version: 1.7.2 -Date: 2021-10-04 +Version: 1.7.3.9000 Authors@R: c(person("Shawn", "Serbin", role = c("aut"), email = "sserbin@bnl.gov"), person("Tobey", "Carman", role = c("aut", "cre"), @@ -27,4 +26,4 @@ Copyright: Authors LazyLoad: yes LazyData: FALSE Encoding: UTF-8 -RoxygenNote: 7.2.3 +RoxygenNote: 7.3.2 diff --git a/models/dvmdostem/LICENSE b/models/dvmdostem/LICENSE index 5a9e44128f1..09ef35a60b4 100644 --- a/models/dvmdostem/LICENSE +++ b/models/dvmdostem/LICENSE @@ -1,34 +1,3 @@ -## This is the master copy of the PEcAn License - -University of Illinois/NCSA Open Source License - -Copyright (c) 2012, University of Illinois, NCSA. All rights reserved. - -PEcAn project -www.pecanproject.org - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal with the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -- Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimers. -- Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimers in the - documentation and/or other materials provided with the distribution. -- Neither the names of University of Illinois, NCSA, nor the names - of its contributors may be used to endorse or promote products - derived from this Software without specific prior written permission. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR -ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF -CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE SOFTWARE. - +YEAR: 2024 +COPYRIGHT HOLDER: PEcAn Project +ORGANIZATION: PEcAn Project, authors affiliations diff --git a/models/dvmdostem/NEWS.md b/models/dvmdostem/NEWS.md new file mode 100644 index 00000000000..c66a28e0593 --- /dev/null +++ b/models/dvmdostem/NEWS.md @@ -0,0 +1,7 @@ +# PEcAn.dvmdostem 1.7.3.9000 + +## License change +* PEcAn.dvmdostem is now distributed under the BSD three-clause license instead of the NCSA Open Source license. + +## Added +* Added a `NEWS.md` file to track changes to the package. Prior to this point changes are tracked in the main CHANGELOG for the PEcAn repository. diff --git a/models/dvmdostem/R/model2netcdf.dvmdostem.R b/models/dvmdostem/R/model2netcdf.dvmdostem.R index a14abf0fde5..2bb0517e69f 100644 --- a/models/dvmdostem/R/model2netcdf.dvmdostem.R +++ b/models/dvmdostem/R/model2netcdf.dvmdostem.R @@ -1,11 +1,3 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2016 NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# University of Illinois/NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- library(lubridate) ##-------------------------------------------------------------------------------------------------# diff --git a/models/dvmdostem/R/version.R b/models/dvmdostem/R/version.R new file mode 100644 index 00000000000..0e58d885272 --- /dev/null +++ b/models/dvmdostem/R/version.R @@ -0,0 +1,3 @@ +# Set at package install time, used by pecan.all::pecan_version() +# to identify development versions of packages +.build_hash <- Sys.getenv("PECAN_GIT_REV", "unknown") diff --git a/models/dvmdostem/R/write.config.dvmdostem.R b/models/dvmdostem/R/write.config.dvmdostem.R index 5293990bdab..9e0298b2040 100644 --- a/models/dvmdostem/R/write.config.dvmdostem.R +++ b/models/dvmdostem/R/write.config.dvmdostem.R @@ -1,12 +1,4 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2012 University of Illinois, NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# University of Illinois/NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- -##------------------------------------------------------------------------------------------------# + ##' Setup the output variables that dvmdostem will generate and PEcAn will analyze. ##' This function handles the interplay between output variables and output spec file. ##' There are custom tags in the section of the pecan xml file for dvmdostem @@ -199,20 +191,25 @@ requested_vars_string2list <- function(req_v_str, outspec_path) { return(req_v_list) } -##------------------------------------------------------------------------------------------------# -##' convert parameters, do unit conversions and update parameter names from PEcAn database default -##' to units/names within dvmdostem -##' -##' Performs model specific unit conversions on a a list of trait values, -##' such as those provided to write.config -##' -##' @name convert.samples.dvmdostem -##' @title Convert samples for dvmdostem -##' @param trait_samples a matrix or dataframe of samples from the trait distribution -##' @return matrix or dataframe with values transformed -##' @export -##' @author Shawn Serbin, Tobey Carman -##' + + + + + + +#' Convert samples for dvmdostem +#' +#' convert parameters, do unit conversions and update parameter names from PEcAn database default +#' to units/names within dvmdostem +#' +#' Performs model specific unit conversions on a a list of trait values, +#' such as those provided to write.config +#' +#' @param trait_values a matrix or dataframe of samples from the trait distribution +#' @return matrix or dataframe with values transformed +#' @export +#' @author Shawn Serbin, Tobey Carman +#' convert.samples.dvmdostem <- function(trait_values) { if("SLA" %in% names(trait_values)) { diff --git a/models/dvmdostem/man/convert.samples.dvmdostem.Rd b/models/dvmdostem/man/convert.samples.dvmdostem.Rd index ef1b77e7819..8e8bdeaa29e 100644 --- a/models/dvmdostem/man/convert.samples.dvmdostem.Rd +++ b/models/dvmdostem/man/convert.samples.dvmdostem.Rd @@ -7,7 +7,7 @@ convert.samples.dvmdostem(trait_values) } \arguments{ -\item{trait_samples}{a matrix or dataframe of samples from the trait distribution} +\item{trait_values}{a matrix or dataframe of samples from the trait distribution} } \value{ matrix or dataframe with values transformed diff --git a/models/dvmdostem/tests/testthat.R b/models/dvmdostem/tests/testthat.R index 1103131e9cb..fe047cf93c0 100644 --- a/models/dvmdostem/tests/testthat.R +++ b/models/dvmdostem/tests/testthat.R @@ -1,11 +1,3 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2012 University of Illinois, NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# University of Illinois/NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- library(testthat) library(PEcAn.utils) diff --git a/models/ed/DESCRIPTION b/models/ed/DESCRIPTION index 00aa6941bcc..308781f1a39 100644 --- a/models/ed/DESCRIPTION +++ b/models/ed/DESCRIPTION @@ -1,8 +1,7 @@ Package: PEcAn.ED2 Type: Package Title: PEcAn Package for Integration of ED2 Model -Version: 1.7.2.9000 -Date: 2021-10-04 +Version: 1.8.0.9000 Authors@R: c(person("Mike", "Dietze", role = c("aut", "cre"), email = "dietze@bu.edu"), person("David", "LeBauer", role = c("aut"), @@ -32,7 +31,7 @@ Description: The Predictive Ecosystem Carbon Analyzer (PEcAn) is a scientific efficacy of scientific investigation. This package provides functions to link the Ecosystem Demography Model, version 2, to PEcAn. Depends: - R (>= 2.10) + R (>= 3.5) Imports: abind (>= 1.4.5), assertthat, @@ -58,13 +57,15 @@ Imports: Suggests: testthat (>= 1.0.2), devtools, + knitr, + rmarkdown, withr Additional_repositories: https://pecanproject.r-universe.dev/ License: BSD_3_clause + file LICENSE Copyright: Authors -LazyLoad: yes +VignetteBuilder: knitr, rmarkdown LazyData: true Encoding: UTF-8 -RoxygenNote: 7.2.3 +RoxygenNote: 7.3.2 Roxygen: list(markdown = TRUE) Config/testthat/edition: 2 diff --git a/models/ed/Dockerfile b/models/ed/Dockerfile index 282a5b5d391..a3873e896a7 100644 --- a/models/ed/Dockerfile +++ b/models/ed/Dockerfile @@ -4,15 +4,12 @@ ARG IMAGE_VERSION="latest" # ---------------------------------------------------------------------- # BUILD MODEL BINARY # ---------------------------------------------------------------------- -FROM pecan/models:${IMAGE_VERSION} as model-binary +FROM pecan/models:${IMAGE_VERSION} AS model-binary # Some variables that can be used to set control the docker build ARG MODEL_VERSION="2.2.0" ARG BINARY_VERSION="2.2" -# specify fortran compiler -ENV FC_TYPE=GNU - # install dependencies RUN apt-get update \ && apt-get install -y --no-install-recommends \ @@ -20,17 +17,16 @@ RUN apt-get update \ curl \ gfortran \ git \ - libhdf5-dev \ - libopenmpi-dev \ + libhdf5-openmpi-dev \ && rm -rf /var/lib/apt/lists/* # download, unzip and build ed2 WORKDIR /src -RUN git -c http.sslVerify=false clone https://github.com/EDmodel/ED2.git \ - && cd ED2/ED/build \ - && curl -o make/include.mk.VM http://isda.ncsa.illinois.edu/~kooper/EBI/include.mk.opt.`uname -s` \ +RUN git -c http.sslVerify=false clone https://github.com/EDmodel/ED2.git +COPY data-raw/include.mk.opt /src/ED2/ED/build/make/include.mk.pecan +RUN cd ED2/ED/build \ && if [ "${MODEL_VERSION}" != "git" ]; then git checkout "v.${MODEL_VERSION}"; fi \ - && ./install.sh -g -p VM \ + && ./install.sh -g -p pecan \ && mv /src/ED2/ED/build/ed_${BINARY_VERSION}-opt /src/ED2/ED/build/ed ######################################################################## @@ -46,7 +42,8 @@ FROM pecan/models:${IMAGE_VERSION} RUN apt-get update \ && apt-get install -y --no-install-recommends \ - libopenmpi3 \ + libhdf5-openmpi-103 \ + libgomp1 \ && rm -rf /var/lib/apt/lists/* # INSTALL PEcAn.ED2 diff --git a/models/ed/NEWS.md b/models/ed/NEWS.md index 25fe06d0323..9bcdf6f5925 100644 --- a/models/ed/NEWS.md +++ b/models/ed/NEWS.md @@ -1,4 +1,7 @@ -# PEcAn.ED2 (development version) +# PEcAn.ED2 1.8.0.9000 + + +# PEcAn.ED2 1.8.0 * Warning messages for `model2netcdf.ed2()` coming from `ncdf4::ncvar_put()` now are prepended with the variable name for easier debugging (#3078) * Fixed a bug in `model2netcdf.ed2()` where .nc file connections were being closed multiple times, printing warnings (#3078) @@ -9,10 +12,6 @@ * Fixed a bug in `read_E_files()` affecting `model2netcdf.ED2()` that resulted in incorrect calculations (#3126) * DDBH (change in DBH over time) is no longer extracted and summarized from monthly -E- files by `model2netcdf.ED2()`. We are not sure it makes sense to summarize this variable across cohorts of different sizes. * The `yr` and `yfiles` arguments of `read_E_files()` are no longer used and the simulation date is extracted from the names of the .h5 files output by ED2. -* Fixed a bug where dimensions of output .nc file would be incorrect if a PFT was missing from ED2 output for less than a full year - - -# PEcAn.ED2 1.7.2.9000 - +* Fixed a bug where dimensions of output .nc file would be incorrect if a PFT was missing from ED2 output for less than a full year (#3140, #3143). +* Added optional `process_partial` argument to `model2netcdf.ED2()` to allow it to process existing output from failed * Added a `NEWS.md` file to track changes to the package. Prior to this point changes are tracked in the main CHANGELOG for the PEcAn repository. -* Added optional `process_partial` argument to `model2netcdf.ED2()` to allow it to process existing output from failed \ No newline at end of file diff --git a/models/ed/R/check_veg.R b/models/ed/R/check_veg.R index d8b4b53f12b..b69c1be2cbb 100644 --- a/models/ed/R/check_veg.R +++ b/models/ed/R/check_veg.R @@ -9,12 +9,13 @@ #' @return `NULL` (invisibly) #' @export check_css <- function(css, pss = NULL) { - if(!inherits(css, "data.frame") | nrow(css) == 0) { + if(!inherits(css, "data.frame") || nrow(css) == 0) { stop("css file should be a data frame") } - if(colnames(css) != c("time", "patch", "cohort", "dbh", "hite", "pft", - "n", "bdead", "balive", "lai")) { + expected_colnames <- c("time", "patch", "cohort", "dbh", "hite", "pft", + "n", "bdead", "balive", "lai") + if (!identical(colnames(css), expected_colnames)) { stop("css file is formatted incorrectly") } @@ -29,7 +30,7 @@ check_css <- function(css, pss = NULL) { #' @rdname check_css #' @export check_pss <- function(pss, site = NULL) { - if(!inherits(pss, "data.frame") | nrow(pss) == 0) { + if (!inherits(pss, "data.frame") || nrow(pss) == 0) { stop("css file should be a data frame") } if (!is.null(site)) { @@ -43,8 +44,9 @@ check_pss <- function(pss, site = NULL) { #' @rdname check_css #' @export check_site <- function(site) { - stopifnot(nrow(site) >= 1) - stopifnot(!is.null(attributes(site))) - stopifnot(attr(site, "nsite") == "numeric") - stopifnot(attr(site, "file_format") %in% c(1, 2, 3)) + stopifnot( + nrow(site) >= 1, + !is.null(attributes(site)), + is.numeric(attr(site, "nsite")), + attr(site, "file_format") %in% c(1, 2, 3)) } diff --git a/models/ed/R/modify_ed2in.R b/models/ed/R/modify_ed2in.R index 3cd6a4549f7..9cc843659db 100644 --- a/models/ed/R/modify_ed2in.R +++ b/models/ed/R/modify_ed2in.R @@ -30,7 +30,7 @@ #' - "restart" -- Restart file for HISTORY runs. (`ISOUTPUT`) #' - "all" -- All output types #' -#' @inheritParams read_ed2in +#' @param ed2in list to modify #' @param ... Namelist arguments (see Description and Details) #' @param veg_prefix Vegetation file prefix (`SFILIN`). If `lat` and `lon` are part of the prefix, #' @param latitude Run latitude coordinate. If `veg_prefix` is also provided, diff --git a/models/ed/R/other.helpers.ED2.R b/models/ed/R/other.helpers.ED2.R index 652c4eb8edc..ea36b124d17 100644 --- a/models/ed/R/other.helpers.ED2.R +++ b/models/ed/R/other.helpers.ED2.R @@ -1,4 +1,4 @@ -#' @title List only files in a directory +#' List only files in a directory #' #' Mostly useful when `recursive` and `full.names` are both FALSE: #' The current implementation sets `full.names` internally, and for recursive diff --git a/models/ed/R/read_ed_metheader.R b/models/ed/R/read_ed_metheader.R index bcc285a2d3d..640a531cde1 100644 --- a/models/ed/R/read_ed_metheader.R +++ b/models/ed/R/read_ed_metheader.R @@ -25,7 +25,7 @@ #' - `flag_description` -- Description of variable flag #' #' The formatting of a meteorology header file is as follows (from the [ED -#' GitHub Wiki][https://github.com/EDmodel/ED2/wiki/Drivers]): +#' GitHub Wiki](https://github.com/EDmodel/ED2/wiki/Drivers)): #' #' ``` #' # Repeat lines below this number of times diff --git a/models/ed/R/version.R b/models/ed/R/version.R new file mode 100644 index 00000000000..0e58d885272 --- /dev/null +++ b/models/ed/R/version.R @@ -0,0 +1,3 @@ +# Set at package install time, used by pecan.all::pecan_version() +# to identify development versions of packages +.build_hash <- Sys.getenv("PECAN_GIT_REV", "unknown") diff --git a/models/ed/R/write.configs.ed.R b/models/ed/R/write.configs.ed.R index f4446a29e92..a0da36fb94d 100644 --- a/models/ed/R/write.configs.ed.R +++ b/models/ed/R/write.configs.ed.R @@ -399,13 +399,13 @@ write.config.ED2 <- function(trait.values, settings, run.id, defaults = settings # ==================================================================================================# ##-------------------------------------------------------------------------------------------------# -##' Clear out old config and ED model run files. -##' -##' @name remove.config.ED2 -##' @title Clear out old config and ED model run files. -##' @return nothing, removes config files as side effect -##' @export -##' @author Shawn Serbin, David LeBauer, Alexey Shikomanov +#' Clear out old config and ED model run files. +#' +#' @param main.outdir ignored +#' @param settings PEcAn settings object +#' @return nothing, removes config files as side effect +#' @export +#' @author Shawn Serbin, David LeBauer, Alexey Shikomanov remove.config.ED2 <- function(main.outdir = settings$outdir, settings) { print(" ") diff --git a/models/ed/data-raw/include.mk.opt b/models/ed/data-raw/include.mk.opt new file mode 100644 index 00000000000..da68d2f1455 --- /dev/null +++ b/models/ed/data-raw/include.mk.opt @@ -0,0 +1,38 @@ +#Makefile include include.mk.opt.ubuntu +############################################################################ + +# Define make (gnu make works best). +MAKE=/usr/bin/make + +# libraries. +BASE=$(ED_ROOT)/build/ + +# HDF 5 Libraries +HDF5_INCS=-I/usr/include/hdf5/openmpi +HDF5_LIBS= -L/usr/lib/$(shell uname -m)-linux-gnu/hdf5/openmpi -lhdf5_fortran -lhdf5_hl -lhdf5 -lz -lm +USE_COLLECTIVE_MPIO=0 + +# interface +USE_INTERF=1 + +# gfortran +CMACH=PC_LINUX1 +FC_TYPE=GNU +F_COMP=mpif90.openmpi +F_OPTS=-O3 -ffree-line-length-none -frecursive -fopenmp -static +C_COMP=mpicc.openmpi +C_OPTS=-O0 -DLITTLE -g -static +LOADER=mpif90.openmpi +LOADER_OPTS=-O3 -ffree-line-length-none -frecursive -fopenmp +C_LOADER==mpicc.openmpi +LIBS= +MOD_EXT=mod + +# using MPI libraries: +MPI_PATH= +PAR_INCS= +PAR_LIBS= +PAR_DEFS= + +# For IBM,HP,SGI,ALPHA,LINUX use these: +ARCHIVE=ar rs diff --git a/models/ed/man/list.files.nodir.Rd b/models/ed/man/list.files.nodir.Rd index ab82a5b34e1..a86032026b1 100644 --- a/models/ed/man/list.files.nodir.Rd +++ b/models/ed/man/list.files.nodir.Rd @@ -2,11 +2,7 @@ % Please edit documentation in R/other.helpers.ED2.R \name{list.files.nodir} \alias{list.files.nodir} -\title{List only files in a directory - -Mostly useful when \code{recursive} and \code{full.names} are both FALSE: -The current implementation sets \code{full.names} internally, and for recursive -listings \code{list.files(..., include.dirs = FALSE)} is equivalent and faster.} +\title{List only files in a directory} \usage{ list.files.nodir(path, ...) } @@ -16,8 +12,6 @@ list.files.nodir(path, ...) \item{...}{arguments passed on to base::list.files} } \description{ -List only files in a directory - Mostly useful when \code{recursive} and \code{full.names} are both FALSE: The current implementation sets \code{full.names} internally, and for recursive listings \code{list.files(..., include.dirs = FALSE)} is equivalent and faster. diff --git a/models/ed/man/modify_ed2in.Rd b/models/ed/man/modify_ed2in.Rd index 26b8d093ca4..ce341cea301 100644 --- a/models/ed/man/modify_ed2in.Rd +++ b/models/ed/man/modify_ed2in.Rd @@ -27,6 +27,8 @@ modify_ed2in( ) } \arguments{ +\item{ed2in}{list to modify} + \item{...}{Namelist arguments (see Description and Details)} \item{veg_prefix}{Vegetation file prefix (\code{SFILIN}). If \code{lat} and \code{lon} are part of the prefix,} diff --git a/models/ed/man/read_ed_metheader.Rd b/models/ed/man/read_ed_metheader.Rd index c35941ddc0a..04c58f16eb8 100644 --- a/models/ed/man/read_ed_metheader.Rd +++ b/models/ed/man/read_ed_metheader.Rd @@ -48,7 +48,7 @@ Starred columns are required for writing. This table is left joined with } } -The formatting of a meteorology header file is as follows (from the \link[=https://github.com/EDmodel/ED2/wiki/Drivers]{ED GitHub Wiki}): +The formatting of a meteorology header file is as follows (from the \href{https://github.com/EDmodel/ED2/wiki/Drivers}{ED GitHub Wiki}): \if{html}{\out{
}}\preformatted{ # Repeat lines below this number of times diff --git a/models/ed/man/remove.config.ED2.Rd b/models/ed/man/remove.config.ED2.Rd index 10c1e6cd7dc..40b8e992899 100644 --- a/models/ed/man/remove.config.ED2.Rd +++ b/models/ed/man/remove.config.ED2.Rd @@ -6,6 +6,11 @@ \usage{ remove.config.ED2(main.outdir = settings$outdir, settings) } +\arguments{ +\item{main.outdir}{ignored} + +\item{settings}{PEcAn settings object} +} \value{ nothing, removes config files as side effect } diff --git a/models/ed/tests/Rcheck_reference.log b/models/ed/tests/Rcheck_reference.log index d478d30d2ae..0b419885b14 100644 --- a/models/ed/tests/Rcheck_reference.log +++ b/models/ed/tests/Rcheck_reference.log @@ -23,7 +23,8 @@ use conditionally. * checking installed package size ... OK * checking package directory ... OK * checking for future file timestamps ... OK -* checking DESCRIPTION meta-information ... OK +* checking DESCRIPTION meta-information ... NOTE +License stub is invalid DCF. * checking top-level files ... OK * checking for left-over files ... OK * checking index information ... OK @@ -46,25 +47,10 @@ File ‘PEcAn.ED2/R/model2netcdf.ED2.R’: * checking Rd files ... OK * checking Rd metadata ... OK * checking Rd line widths ... OK -* checking Rd cross-references ... WARNING -Missing link or links in documentation object 'read_ed_metheader.Rd': - ‘https://github.com/EDmodel/ED2/wiki/Drivers’ - -See section 'Cross-references' in the 'Writing R Extensions' manual. +* checking Rd cross-references ... OK * checking for missing documentation entries ... OK * checking for code/documentation mismatches ... OK -* checking Rd \usage sections ... WARNING -Undocumented arguments in documentation object 'modify_ed2in' - ‘ed2in’ - -Undocumented arguments in documentation object 'remove.config.ED2' - ‘main.outdir’ ‘settings’ - -Functions with \usage entries need to have the appropriate \alias -entries, and all their arguments documented. -The \usage entries must correspond to syntactically valid R code. -See chapter ‘Writing R documentation files’ in the ‘Writing R -Extensions’ manual. +* checking Rd \usage sections ... OK * checking Rd contents ... OK * checking for unstated dependencies in examples ... OK * checking contents of ‘data’ directory ... OK @@ -72,10 +58,7 @@ Extensions’ manual. * checking LazyData ... OK * checking data for ASCII and uncompressed saves ... OK * checking R/sysdata.rda ... OK -* checking files in ‘vignettes’ ... WARNING -Files in the 'vignettes' directory but no files in 'inst/doc': - ‘running_ed_from_R.Rmd’ -Package has no Sweave vignette sources and no VignetteBuilder field. +* checking files in ‘vignettes’ ... OK * checking examples ... OK * checking for unstated dependencies in ‘tests’ ... OK * checking tests ... diff --git a/models/ed/vignettes/running_ed_from_R.Rmd b/models/ed/vignettes/running_ed_from_R.Rmd index 4e590f035f4..39393384bdc 100644 --- a/models/ed/vignettes/running_ed_from_R.Rmd +++ b/models/ed/vignettes/running_ed_from_R.Rmd @@ -1,6 +1,10 @@ --- title: "Running ED from R" author: "Alexey Shiklomanov" +output_format: rmarkdown::html_vignette +vignette: | + %\VignetteIndexEntry{Running ED from R} + %\VignetteEngine{knitr::rmarkdown} --- # Introduction @@ -17,13 +21,15 @@ This tutorial describes these utilites and provides examples of common use cases The `PEcAn.ED2` package and its PEcAn dependencies can be installed from GitHub as follows (all CRAN package dependencies should be installed automatically): -```{r install, eval = -(1:5)} +```{r install, eval = FALSE} devtools::install_github("pecanproject/pecan", ref = "develop", subdir = "base/logger") devtools::install_github("pecanproject/pecan", ref = "develop", subdir = "base/utils") devtools::install_github("pecanproject/pecan", ref = "develop", subdir = "base/settings") devtools::install_github("pecanproject/pecan", ref = "develop", subdir = "modules/data.atmosphere") devtools::install_github("pecanproject/pecan", ref = "develop", subdir = "models/ed") +``` +```{r library} library(PEcAn.ED2) ``` @@ -46,8 +52,7 @@ These can be customized to some extent, but this package provides a version of t These inputs, stored in an "EDI" directory, can be downloaded via the `download_edi` function: ```{r get_edi} -library(here) -rundir <- here("vignettes", "ed_run_data") +rundir <- file.path(tempdir(), "ed_run_data") dir.create(rundir, showWarnings = FALSE) edi_dir <- file.path(rundir, "EDI") @@ -164,6 +169,7 @@ ed2in <- modify_ed2in( run_dir = file.path(rundir, "run"), output_dir = file.path(rundir, "out"), runtype = "INITIAL", + pecan_defaults = TRUE, EXPNME = "ED test run" ) ``` diff --git a/models/fates/DESCRIPTION b/models/fates/DESCRIPTION index baa8dc55b88..a44c6011931 100644 --- a/models/fates/DESCRIPTION +++ b/models/fates/DESCRIPTION @@ -1,8 +1,7 @@ Package: PEcAn.FATES Type: Package Title: PEcAn Package for Integration of FATES Model -Version: 1.7.2 -Date: 2021-10-04 +Version: 1.7.3.9000 Authors@R: c(person("Mike", "Dietze", role = c("aut", "cre"), email = "dietze@bu.edu"), person("Shawn", "Serbin", role = c("aut"), @@ -22,7 +21,8 @@ Imports: PEcAn.remote, PEcAn.utils, lubridate (>= 1.6.0), - ncdf4 (>= 1.15) + ncdf4 (>= 1.15), + tibble Suggests: testthat (>= 1.0.2) License: BSD_3_clause + file LICENSE @@ -30,4 +30,4 @@ Copyright: Authors LazyLoad: yes LazyData: FALSE Encoding: UTF-8 -RoxygenNote: 7.2.3 +RoxygenNote: 7.3.2 diff --git a/models/fates/LICENSE b/models/fates/LICENSE index 9e38c2dc685..09ef35a60b4 100644 --- a/models/fates/LICENSE +++ b/models/fates/LICENSE @@ -1,29 +1,3 @@ -University of Illinois/NCSA Open Source License - -Copyright (c) 2012, University of Illinois, NCSA. All rights reserved. - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal with the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -- Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimers. -- Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimers in the - documentation and/or other materials provided with the distribution. -- Neither the names of University of Illinois, NCSA, nor the names - of its contributors may be used to endorse or promote products - derived from this Software without specific prior written permission. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR -ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF -CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE SOFTWARE. - +YEAR: 2024 +COPYRIGHT HOLDER: PEcAn Project +ORGANIZATION: PEcAn Project, authors affiliations diff --git a/models/fates/NEWS.md b/models/fates/NEWS.md new file mode 100644 index 00000000000..ed267da8bfd --- /dev/null +++ b/models/fates/NEWS.md @@ -0,0 +1,7 @@ +# PEcAn.FATES 1.7.3.9000 + +## License change +* PEcAn.FATES is now distributed under the BSD three-clause license instead of the NCSA Open Source license. + +## Added +* Added a `NEWS.md` file to track changes to the package. Prior to this point changes are tracked in the main CHANGELOG for the PEcAn repository. diff --git a/models/fates/R/met2model.FATES.R b/models/fates/R/met2model.FATES.R old mode 100644 new mode 100755 index df232462351..0398ff7a547 --- a/models/fates/R/met2model.FATES.R +++ b/models/fates/R/met2model.FATES.R @@ -1,11 +1,3 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2016 NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# University of Illinois/NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- # R Code to convert NetCDF CF met files into NetCDF FATES met files. @@ -16,148 +8,175 @@ ##' @param in.path location on disk where inputs are stored ##' @param in.prefix prefix of input and output files ##' @param outfolder location on disk where outputs will be stored -##' @param start_date the start date of the data to be downloaded (will only use the year part of the date) -##' @param end_date the end date of the data to be downloaded (will only use the year part of the date) +##' @param start_date the start date of the data to be downloaded +##' @param end_date the end date of the data to be downloaded ##' @param lst timezone offset to GMT in hours +##' @param lat,lon latitude and longitude of site in decimal degrees ##' @param overwrite should existing files be overwritten -##' @param verbose should the function be very verbosefor(year in start_year:end_year) +##' @param verbose should the function be very verbose for(year in start_year:end_year) +##' @param ... additional arguments, currently ignored ##' @importFrom ncdf4 ncvar_get ncdim_def ncatt_get ncvar_put -met2model.FATES <- function(in.path, in.prefix, outfolder, start_date, end_date, lst = 0, lat, lon, - overwrite = FALSE, verbose = FALSE, ...) { - + +met2model.FATES <- function(in.path,in.prefix,outfolder,start_date,end_date,lst=0,lat, lon, overwrite = FALSE, verbose = FALSE, ...) { # General Structure- FATES Uses Netcdf so we need to rename vars, split files from years into months, and generate the header file # Get Met file from inpath. # Loop over years (Open nc.file,rename vars,change dimensions as needed,close/save .nc file) # close # defining temporal dimension needs to be figured out. If we configure FATES to use same tstep then we may not need to change dimensions - - - insert <- function(ncout, name, unit, data) { - var <- ncdf4::ncvar_def(name = name, units = unit, dim = dim, missval = -6999, verbose = verbose) - ncout <- ncdf4::ncvar_add(nc = ncout, v = var, verbose = verbose) - ncvar_put(nc = ncout, varid = name, vals = data) + insert <- function(ncout, name, unit, data, dim) { + var <- ncdf4::ncvar_def(name, unit, dim = dim, missval = as.numeric(1.0e36), verbose = verbose) + ncout <- ncdf4::ncvar_add(ncout, var) + ncdf4::ncvar_put(nc = ncout, varid = name, vals = data) return(invisible(ncout)) } - sm <- c(0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365) * 86400 ## day of year thresholds ## Create output directory - dir.create(outfolder) + if (!file.exists(outfolder)){ + dir.create(outfolder) + } - # Process start and end dates - start_date <- as.POSIXlt(start_date, tz = "UTC") - end_date <- as.POSIXlt(end_date, tz = "UTC") + ## Process start, end dates + start_date <- as.POSIXlt(start_date, tz = "UTC", origin = "1700-01-01") + end_date <- as.POSIXlt(end_date, tz = "UTC", origin = "1700-01-01") start_year <- lubridate::year(start_date) end_year <- lubridate::year(end_date) - + ## Build met for (year in start_year:end_year) { - + + ## Process time + base_time <- difftime(paste0(year,"-01-01"),"1700-01-01", units="days") ## days of the year + if (lubridate::leap_year(year)){ # True + sm <- c(0, 31, 58, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365) + } + else { + sm <- c(0, 31, 59, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366) + } + in.file <- file.path(in.path, paste(in.prefix, year, "nc", sep = ".")) - if (file.exists(in.file)) { - + ## Open netcdf file nc <- ncdf4::nc_open(in.file) - + ## extract variables. These need to be read in and converted to CLM names (all units are correct) - time <- ncvar_get(nc, "time") - latitude <- ncvar_get(nc, "latitude") - longitude <- ncvar_get(nc, "longitude") - FLDS <- ncvar_get(nc, "surface_downwelling_longwave_flux_in_air") ## W/m2 - FSDS <- ncvar_get(nc, "surface_downwelling_shortwave_flux_in_air") ## W/m2 - PRECTmms <- ncvar_get(nc, "precipitation_flux") ## kg/m2/s -> mm/s (same val, diff name) - PSRF <- ncvar_get(nc, "air_pressure") ## Pa - SHUM <- ncvar_get(nc, "specific_humidity") ## g/g -> kg/kg - TBOT <- ncvar_get(nc, "air_temperature") ## K - WIND <- sqrt(ncvar_get(nc, "eastward_wind") ^ 2 + ncvar_get(nc, "northward_wind") ^ 2) ## m/s + time <- ncdf4::ncvar_get(nc, "time") + LATIXY <- ncdf4::ncvar_get(nc, "latitude") + LONGXY <- ncdf4::ncvar_get(nc, "longitude") + FLDS <- ncdf4::ncvar_get(nc, "surface_downwelling_longwave_flux_in_air") ## W/m2 + FSDS <- ncdf4::ncvar_get(nc, "surface_downwelling_shortwave_flux_in_air") ## W/m2 + PRECTmms <- ncdf4::ncvar_get(nc, "precipitation_flux") ## kg/m2/s -> mm/s (same val, diff name) + PSRF <- ncdf4::ncvar_get(nc, "air_pressure") ## Pa + QBOT <- ncdf4::ncvar_get(nc, "specific_humidity") ## g/g -> kg/kg + TBOT <- ncdf4::ncvar_get(nc, "air_temperature") ## K + WIND <- sqrt(ncdf4::ncvar_get(nc, "eastward_wind") ^ 2 + ncdf4::ncvar_get(nc, "northward_wind") ^ 2) ## m/s ## CREATE MONTHLY FILES for (mo in 1:12) { - tsel <- which(time > sm[mo] & time <= sm[mo + 1]) - outfile <- file.path(outfolder, paste0(formatC(year, width = 4, flag = "0"), "-", - formatC(mo, width = 2, flag = "0"), ".nc")) - if (file.exists(outfile) & overwrite == FALSE) { + if (((year==start_year) & (molubridate::month(end_date)))){ next } - - lat.dim <- ncdim_def(name = "latitude", units = "", vals = 1:1, create_dimvar = FALSE) - lon.dim <- ncdim_def(name = "longitude", units = "", vals = 1:1, create_dimvar = FALSE) - time.dim <- ncdim_def(name = "time", units = "seconds", vals = time, - create_dimvar = TRUE, unlim = TRUE) - dim <- list(lat.dim, lon.dim, time.dim) ## docs say this should be time,lat,lon but get error writing unlimited first - ## http://www.cesm.ucar.edu/models/cesm1.2/clm/models/lnd/clm/doc/UsersGuide/x12979.html - - # LATITUDE - var <- ncdf4::ncvar_def(name = "latitude", units = "degree_north", - dim = list(lat.dim, lon.dim), missval = as.numeric(-9999)) - ncout <- ncdf4::nc_create(outfile, vars = var, verbose = verbose) - ncvar_put(nc = ncout, varid = "latitude", vals = latitude) - - # LONGITUDE - var <- ncdf4::ncvar_def(name = "longitude", units = "degree_east", - dim = list(lat.dim, lon.dim), missval = as.numeric(-9999)) - ncout <- ncdf4::ncvar_add(nc = ncout, v = var, verbose = verbose) - ncvar_put(nc = ncout, varid = "longitude", vals = longitude) - - ## surface_downwelling_longwave_flux_in_air - ncout <- insert(ncout, "FLDS", "W m-2", FLDS) - - ## surface_downwelling_shortwave_flux_in_air - ncout <- insert(ncout, "FSDS", "W m-2", FSDS) - - ## precipitation_flux - ncout <- insert(ncout, "PRECTmms", "mm/s", PRECTmms) - - ## air_pressure - ncout <- insert(ncout, "PSRF", "Pa", PSRF) - - ## specific_humidity - ncout <- insert(ncout, "SHUM", "kg/kg", SHUM) - - ## air_temperature - ncout <- insert(ncout, "TBOT", "K", TBOT) - - ## eastward_wind & northward_wind - ncout <- insert(ncout, "WIND", "m/s", WIND) - - ncdf4::nc_close(ncout) - - # ncvar_rename(ncfile,varid="LONGXY") - # ncvar_rename(ncfile,varid="LATIXY") - # # - # # double EDGEW(scalar) ; - # # EDGEW:long_name = "western edge in atmospheric data" ; - # # EDGEW:units = "degrees E" ; - # EDGEW = ncvar_rename(ncfile,"EDGEW","EDGEW") - # - # # double EDGEE(scalar) ; - # # EDGEE:long_name = "eastern edge in atmospheric data" ; - # # EDGEE:units = "degrees E" ; - # EDGEE = ncvar_rename(ncfile,"EDGEE","EDGEE") - # - # # double EDGES(scalar) ; - # # EDGES:long_name = "southern edge in atmospheric data" ; - # # EDGES:units = "degrees N" ; - # EDGES = ncvar_rename(ncfile,"EDGES","EDGES") - # # - # # double EDGEN(scalar) ; - # # EDGEN:long_name = "northern edge in atmospheric data" ; - # # EDGEN:units = "degrees N" ; - # EDGEN = ncvar_rename(ncfile,"EDGEN","EDGEN") + else { + # slice + tsel <- which(time > base_time+sm[mo] & time <= base_time+sm[mo+1]) + print(mo) + if (length(tsel)!=0){ + # define dim + lat.dim <- ncdf4::ncdim_def(name = "lat", units = "", vals = 1:1, create_dimvar=FALSE) + lon.dim <- ncdf4::ncdim_def(name = "lon", units = "", vals = 1:1, create_dimvar=FALSE) + time.dim <- ncdf4::ncdim_def(name = "time", units = "", vals = 1:length(time[tsel]),create_dimvar = TRUE, calendar="standard", unlim = FALSE) #left to CTSM automatically transfer + scalar.dim <- ncdf4::ncdim_def(name="scalar", units = "", vals = 1:1) + dim <- list(time.dim, lat.dim, lon.dim) + + # LATITUDE + var_lat <- ncdf4::ncvar_def(name = "LATIXY", units = "degree_north", + dim = list(lat.dim, lon.dim), missval = as.numeric(-9999)) + # LONGITUDE + var_long <- ncdf4::ncvar_def(name = "LONGXY", units = "degree_east", + dim = list(lat.dim, lon.dim), missval = as.numeric(-9999)) + # time + var_time <- ncdf4::ncvar_def(name = "time", units = "days since 1700-01-01", prec = "float", + dim = list(time.dim), missval = as.numeric(-9999)) + # EDGEE + var_E <- ncdf4::ncvar_def(name = "EDGEE", units = "degrees_east", + dim = list(scalar.dim, lat.dim, lon.dim), missval = as.numeric(-9999)) + # EDGEW edge for resolution , edge-central 0.005, # PEcAn provide range of grid? + var_W <- ncdf4::ncvar_def(name = "EDGEW", units = "degrees_west", + dim = list(scalar.dim, lat.dim, lon.dim), missval = as.numeric(-9999)) + # EDGES + var_S <- ncdf4::ncvar_def(name = "EDGES", units = "degrees_south", + dim = list(scalar.dim, lat.dim, lon.dim), missval = as.numeric(-9999)) + # EDGEN + var_N <- ncdf4::ncvar_def(name = "EDGEN", units = "degrees_north", + dim = list(scalar.dim, lat.dim, lon.dim), missval = as.numeric(-9999)) + + ## SAPERATELY CREATE FILES + put_var <- function(ncout){ + ncdf4::ncvar_put(nc = ncout, varid = "LATIXY", vals = LATIXY) #same with FATES + ncdf4::ncvar_put(nc = ncout, varid = "LONGXY", vals = LONGXY) + ncdf4::ncvar_put(nc = ncout, varid = "EDGEE", vals = LONGXY+0.005) + ncdf4::ncvar_put(nc = ncout, varid = "EDGEW", vals = LONGXY-0.005) + ncdf4::ncvar_put(nc = ncout, varid = "EDGES", vals = LATIXY-0.005) + ncdf4::ncvar_put(nc = ncout, varid = "EDGEN", vals = LATIXY+0.005) + } + ## Precipitation + outfile_prec <- file.path(outfolder, paste0("Prec", formatC(year, width = 4, flag = "0"), "-", + formatC(mo, width = 2, flag = "0"), ".nc")) + if (file.exists(outfile_prec) & overwrite == FALSE) { + next + } + ncout_prec <- ncdf4::nc_create(outfile_prec, vars = list(var_lat,var_long,var_E,var_W,var_S,var_N), verbose = verbose) + put_var(ncout_prec) + ## precipitation_flux + ncout_prec <- insert(ncout_prec, "PRECTmms", "mm/s", PRECTmms[tsel], dim) + ncdf4::nc_close(ncout_prec) + + ## Solar + outfile_slr <- file.path(outfolder, paste0("Slr", formatC(year, width = 4, flag = "0"), "-", + formatC(mo, width = 2, flag = "0"), ".nc")) + if (file.exists(outfile_slr) & overwrite == FALSE) { + next + } + ncout_slr <- ncdf4::nc_create(outfile_slr, vars = list(var_lat,var_long,var_E,var_W,var_S,var_N), verbose = verbose) + put_var(ncout_slr) + ## surface_downwelling_shortwave_flux_in_air + ncout_slr <- insert(ncout_slr, "FSDS", "W m-2", FSDS[tsel], dim) + ncdf4::nc_close(ncout_slr) + + ## Temerature and humidity + outfile_tem <- file.path(outfolder, paste0("Tem", formatC(year, width = 4, flag = "0"), "-", + formatC(mo, width = 2, flag = "0"), ".nc")) + if (file.exists(outfile_tem) & overwrite == FALSE) { + next + } + ncout_tem <- ncdf4::nc_create(outfile_tem, vars = list(var_lat,var_long,var_E,var_W,var_S,var_N), verbose = verbose) + put_var(ncout_tem) + ## surface_downwelling_longwave_flux_in_air + ncout_tem <- insert(ncout_tem, "FLDS", "W m-2", FLDS[tsel], dim) + ## air_pressure + ncout_tem <- insert(ncout_tem, "PSRF", "Pa", PSRF[tsel], dim) + ## specific_humidity + ncout_tem <- insert(ncout_tem, "QBOT", "kg/kg", QBOT[tsel], dim) + ## air_temperature + ncout_tem <- insert(ncout_tem, "TBOT", "K", TBOT[tsel], dim) + ## eastward_wind & northward_wind + ncout_tem <- insert(ncout_tem, "WIND", "m/s", WIND[tsel], dim) + ncdf4::nc_close(ncout_tem) + } + } } - - ncdf4::nc_close(nc) - } ## end file exists - } ### end loop over met files + ncdf4::nc_close(nc) + } ## end input file + } ## end year loop over met files + results <- data.frame(file = paste0(outfolder, "/"), + host = c(PEcAn.remote::fqdn()), + mimetype = c("application/x-netcdf"), + formatname = c("CLM met"), + startdate = c(start_date), + enddate = c(end_date), + dbfile.name = "", + stringsAsFactors = FALSE) PEcAn.logger::logger.info("Done with met2model.FATES") - - return(data.frame(file = paste0(outfolder, "/"), - host = c(PEcAn.remote::fqdn()), - mimetype = c("application/x-netcdf"), - formatname = c("CLM met"), - startdate = c(start_date), - enddate = c(end_date), - dbfile.name = "", - stringsAsFactors = FALSE)) + return(invisible(results)) } # met2model.FATES diff --git a/models/fates/R/model2netcdf.FATES.R b/models/fates/R/model2netcdf.FATES.R index b85cf946d38..382c23056f1 100644 --- a/models/fates/R/model2netcdf.FATES.R +++ b/models/fates/R/model2netcdf.FATES.R @@ -1,590 +1,167 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2016 NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# University of Illinois/NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- ##' @name model2netcdf.FATES ##' @title Code to convert FATES netcdf output into into CF standard ##' -##' @param outdir Location of FATES model output +##' @param outdir Location of FATES model output (e.g. a path to a single ensemble output) +##' @param sitelat Latitude of the site +##' @param sitelon Longitude of the site +##' @param start_date Start time of the simulation, not string +##' @param end_date End time of the simulation, not string +##' @param vars_names Names of Selected variables in PEcAn format, (e.g. c("","")) +##' @param pfts a named vector of PFT numbers where the names are PFT names ##' ##' @examples ##' \dontrun{ ##' example.output <- system.file("case.clm2.h0.2004-01-01-00000.nc",package="PEcAn.FATES") -##' model2netcdf.FATES(outdir="~/") +##' model2netcdf.FATES(outdir="~/",sitelat, sitelon, start_date, end_date, vars_names, pfts) ##' } -##' -##' @export ##' ##' @author Michael Dietze, Shawn Serbin -model2netcdf.FATES <- function(outdir) { +## modified Yucong Hu 22/07/24 +##' +##' @export - # E.g. var_update("AR","AutoResp","kgC m-2 s-1", "Autotrophic Respiration") - # currently only works for xyt variables, need to expand to work for cohort-level outputs, - # age bins, soils, etc - var_update <- function(out,oldname,newname,newunits=NULL,long_name=NULL){ - if (oldname %in% ncin_names) { - ## define variable - oldunits <- ncdf4::ncatt_get(ncin,oldname,"units")$value - if (oldunits=="gC/m^2/s") oldunits <- "gC m-2 s-1" - if (oldname=="TLAI" && oldunits=="none") oldunits <- "m2 m-2" - if(is.null(newunits)) newunits = oldunits - newvar <- ncdf4::ncvar_def(name = newname, units = newunits, longname=long_name, dim = xyt) - - ## convert data - dat <- ncdf4::ncvar_get(ncin,oldname) - dat.new <- PEcAn.utils::misc.convert(dat,oldunits,newunits) - - ## prep for writing - if(is.null(out)) { - out <- list(var <- list(),dat <- list()) - out$var[[1]] <- newvar - out$dat[[1]] <- dat.new - } else { - i <- length(out$var) + 1 - out$var[[i]] <- newvar - out$dat[[i]] <- dat.new - } +model2netcdf.FATES <- function(outdir, sitelat, sitelon, start_date, end_date, vars_names, pfts){ + ## Tips: matched_var could be expanded for more selected variables + matched_var <- tibble::tribble( + ~fatesname, ~pecanname, ~pecanunits, ~longname, + "FATES_GPP_PF","GPP","kgC m-2 s-1","Gross Primary Productivity", + "FATES_NPP_PF","NPP","kg m-2 yr-1", "Total PFT-level NPP in kg carbon per m2 land area per second", + "NEE","NEE","kgC m-2 s-1", "Net Ecosystem Exchange of carbon, includes fire and hrv_xsmrpool", + "TLAI","LAI","m2 m-2","Total projected leaf area index", + "ER","TotalResp","kgC m-2 s-1","Total Respiration", + "AR","AutoResp","kgC m-2 s-1","Autotrophic respiration (MR + GR)", + "HR","HeteroResp","kgC m-2 s-1","Total heterotrophic respiration", + "SR","SoilResp","kgC m-2 s-1","Total soil respiration (HR + root resp)", + "Qle","Evap","kgC m-2 s-1","Total evaporation", + "QVEGT","Transp","kg m-2 s-1","Canopy transpiration") + + ## Update unit, dimension and + var_update <- function(out,oldname,newname,nc_month,nc_month_names,newunits=NULL,long_name=NULL){ + if (oldname %in% nc_month_names) { + + ## define units of variables + oldunits <- ncdf4::ncatt_get(nc_month,oldname,"units")$value + if (oldunits=="gC/m^2/s") oldunits <- "gC m-2 s-1" + if (oldname=="TLAI") oldunits <- "m2 m-2" # delete old unit ='none' + if (is.null(newunits)) newunits = oldunits + + ## check pft dimensions + d_name <- c() + for (i in (nc_month$var[[oldname]]$dim)){ + d_name <- append(d_name, i$name) + } + if (any(grepl('pft',d_name))){ + dimension <- xypt # include fates_levpft + }else{ + dimension <- xyt # only xyt + } + + ## transpose dimensions into (,t) + if (d_name[length(d_name)]=='time'){ + dat_0 <- ncdf4::ncvar_get(nc_month,oldname) # time at the tail of dims + dat.new <- PEcAn.utils::misc.convert(dat_0,oldunits,newunits) # convert data units + } + newvar <- ncdf4::ncvar_def(name = newname, units = newunits, longname=long_name, dim = dimension) + + ## adding target variables into out + if(is.null(out)) { + out <- list(var <- list(),dat <- list(), dimm<-list()) + out$var[[1]] <- newvar + out$dat[[1]] <- dat.new + out$dimm[[1]]<- length(dimension) } else { - ## correct way to "skip" and output variables that may be missing in the HLM-FATES output? - PEcAn.logger::logger.info(paste0("HLM-FATES variable: ", oldname," not present. Skipping conversion")) + i <- length(out$var) + 1 + out$var[[i]] <- newvar + out$dat[[i]] <- dat.new + out$dimm[[i]]<- length(dimension) } - return(out) + return(out) } + } - ## Get files and years - files <- dir(outdir, "*clm2.h0.*.nc", full.names = TRUE) # currently specific to clm2.h0 files - file.dates <- as.Date(sub(".nc", "", sub(".*clm2.h0.", "", files))) - years <- lubridate::year(file.dates) - init_year <- unique(years)[1] + ## Get files and years + files <- dir(outdir, "*clm2.h0.*.nc", full.names = TRUE) # currently specific to clm2.h0 files + start_year <- lubridate::year(start_date) + end_year <- lubridate::year(end_date) + start_month <- lubridate::month(start_date) + end_month <- lubridate::month(end_date) - ## Loop over years - for (year in unique(years)) { - ysel <- which(years == year) ## subselect files for selected year - if (length(ysel) > 1) { - PEcAn.logger::logger.warn("PEcAn.FATES::model2netcdf.FATES does not currently support multiple files per year") + ## Loop over years + for (year in start_year:end_year){ + oname <- file.path(dirname(files[1]), paste0(year, ".nc")) + out <- NULL + + ## monthly write files + for (mo in 1:12){ + if (((year == start_year) & mo < start_month) | ((year == end_year) & mo > end_month)){ + next ## skip unselected months + } + else{ + if (mo < 10){ + month_file <- paste0(gsub("h0.*.nc","",files[1]),"h0.",year,"-0",mo,".nc") + }else{ + month_file <- paste0(gsub("h0.*.nc","",files[1]),"h0.",year,"-",mo,".nc") } + nc_month <- ncdf4::nc_open(month_file) # read monthly output file of FATES model + nc_month_names <- names(nc_month$var) + + ## create time bounds to populate time_bounds variable iteratively + var_bound <- ncdf4::ncvar_get(nc_month, "time_bounds") # start,end day of month - fname <- files[ysel[1]] - oname <- file.path(dirname(fname), paste0(year, ".nc")) - PEcAn.logger::logger.info(paste("model2netcdf.FATES - Converting:", fname, "to", oname)) - ncin <- ncdf4::nc_open(fname, write = TRUE) - ncin_names <- names(ncin$var) # get netCDF variable names in HLM-FATES output - + ## define dimensions + t <- ncdf4::ncdim_def(name = "time", units = "days since 1700-01-01 00:00:00", + vals = as.double(1.0:1.0), calendar = "noleap", unlim = TRUE) + time_interval <- ncdf4::ncdim_def(name = "hist_interval", + longname = "history time interval endpoint dimensions",vals = 1:2, units = "") + lat <- ncdf4::ncdim_def("lat", "degrees_north", vals = as.double(1.0:1.0), longname = "coordinate_latitude") + lon <- ncdf4::ncdim_def("lon", "degrees_east", vals = as.double(1.0:1.0), longname = "coordinate_longitude") + pft <- ncdf4::ncdim_def('pft', '', vals=1:12, longname = "FATES pft number") + xyt <- list(lon, lat, t) + xypt <- list(lon, lat, pft, t) - ## FATES time is in multiple columns, create 'time' - mcdate <- ncdf4::ncvar_get(ncin, "mcdate") # current date (YYYYMMDD) - if (length(mcdate)==1) { - ## do we need to bother converting outputs where FATES provides only a single timepoint for a date? - ## usually happens when the model starts/finishes at the end/start of a new year - PEcAn.logger::logger.debug("*** Skipping conversion for output with only a single timepoint ***") - next + ## write monthly files with start(1,1,i) + for (var_s in vars_names){ + matched_ind <- which(matched_var$pecanname == var_s) + out <- var_update(out, matched_var$fatesname[matched_ind],matched_var$pecanname[matched_ind], + nc_month,nc_month_names,matched_var$pecanunits[matched_ind],matched_var$longname[matched_ind]) } - cal_dates <- as.Date(as.character(mcdate),format="%Y%m%d") # in standard YYYY-MM-DD format - julian_dates <- lubridate::yday(cal_dates) # current year DOY values - day <- ncdf4::ncvar_get(ncin, "mdcur") # current day (from base day) - sec <- ncdf4::ncvar_get(ncin, "mscur") # current seconds of current day - nstep <- ncdf4::ncvar_get(ncin, "nstep") # model time step - time <- day + sec / 86400 # fractional time since base date (typically first day of full model simulation) - iter_per_day <- length(unique(sec)) # how many outputs per day (e.g. 1, 24, 48) - timesteps <- utils::head(seq(0, 1, by = 1 / iter_per_day), -1) # time of day fraction - current_year_tvals <- (julian_dates-1 + timesteps) # fractional DOY of current year - nt <- length(time) # output length - nc_time <- ncin$dim$time$vals # days since "start_date" + out$var[[length(out$var) + 1]] <- ncdf4::ncvar_def(name="time_bounds", units='', + longname = "history time interval endpoints", dim=list(time_interval,t), prec = "double") + out$dat[[length(out$dat) + 1]] <- c(rbind(var_bound[1], var_bound[2])) #start, end days of the year + out$dimm[[length(out$dimm) + 1]] <- 2 - # !! Is this a useful/reasonable check? That is that our calculated time - # matches FATES internal time var. - if (length(time)!=length(nc_time)) { - PEcAn.logger::logger.severe("Time dimension mismatch in output, simulation error?") + ## define vars + if (((year != start_year) & (mo == 1)) | ((year == start_year) & (mo == start_month))){ + ncout <- ncdf4::nc_create(oname, out$var) # create yearly nc file + time_var <- ncdf4::ncvar_def(name = "time", units = "days since 1700-01-01 00:00:00",longname = "time", dim = list(t), prec = "double") + lat_var <- ncdf4::ncvar_def(name = "lat", units = "degrees_north", longname = "coordinate_latitude", dim = list(lat), prec = "double") + lon_var <- ncdf4::ncvar_def(name = "lon", units = "degrees_east", longname = "coordinate_longitude", dim = list(lon), prec = "double") + + ncdf4::ncvar_put(ncout, lat_var, sitelat, start = c(1)) + ncdf4::ncvar_put(ncout, lon_var, sitelon, start = c(1)) } - ## Create time bounds to populate time_bounds variable - bounds <- array(data = NA, dim = c(length(time), 2)) - bounds[, 1] <- time - bounds[, 2] <- bounds[, 1] + (1 / iter_per_day) - bounds <- round(bounds, 4) # create time bounds for each timestep in t, t+1; t+1, t+2... format + ## put time and vars + ncdf4::ncvar_put(ncout, time_var, mean(var_bound), start=c(mo), count=c(1)) - #******************** Declare netCDF dimensions ********************# - nc_var <- list() - sitelat <- ncdf4::ncvar_get(ncin,"lat") - sitelon <- ncdf4::ncvar_get(ncin,"lon") - ## time variable based on internal calc, nc$dim$time is the FATES output time - t <- ncdf4::ncdim_def(name = "time", units = paste0("days since ", init_year, "-01-01 00:00:00"), - vals = as.vector(time), calendar = "noleap", unlim = TRUE) - time_interval <- ncdf4::ncdim_def(name = "hist_interval", - longname = "history time interval endpoint dimensions", - vals = 1:2, units = "") - lat <- ncdf4::ncdim_def("lat", "degrees_north", vals = as.numeric(sitelat), longname = "coordinate_latitude") - lon <- ncdf4::ncdim_def("lon", "degrees_east", vals = as.numeric(sitelon), longname = "coordinate_longitude") - xyt <- list(lon, lat, t) - - ### build netCDF data - ## !! TODO: ADD MORE OUTPUTS HERE - out <- NULL - out <- var_update(out,"AR","AutoResp","kgC m-2 s-1","Autotrophic Respiration") - out <- var_update(out,"HR","HeteroResp","kgC m-2 s-1","Heterotrophic Respiration") - out <- var_update(out,"GPP","GPP","kgC m-2 s-1","Gross Primary Productivity") - out <- var_update(out,"NPP","NPP","kgC m-2 s-1","Net Primary Productivity") - out <- var_update(out,"NEP","NEE","kgC m-2 s-1", "Net Ecosystem Exchange") - out <- var_update(out,"FLDS","LWdown","W m-2","Surface incident longwave radiation") - out <- var_update(out,"FSDS","SWdown","W m-2","Surface incident shortwave radiation") - out <- var_update(out,"TBOT","Tair","K","Near surface air temperature") # not certain these are equivelent yet - out <- var_update(out,"QBOT","Qair","kg kg-1","Near surface specific humidity") # not certain these are equivelent yet - out <- var_update(out,"RH","RH","%","Relative Humidity") - out <- var_update(out,"WIND","Wind","m s-1","Near surface module of the wind") # not certain these are equivelent yet - out <- var_update(out,"EFLX_LH_TOT","Qle","W m-2","Latent heat") - out <- var_update(out,"QVEGT","Transp","mm s-1","Total Transpiration") ## equiv to std of kg m-2 s but don't trust udunits to get right - out <- var_update(out,"ED_balive","TotLivBiom","kgC m-2","Total living biomass") - out <- var_update(out,"ED_biomass","AbvGrndWood","kgC m-2","Above ground woody biomass") # not actually correct, need to update - out <- var_update(out,"AGB","AGB","kgC m-2","Total aboveground biomass") # not actually correct, need to update - out <- var_update(out,"ED_bleaf","leaf_carbon_content","kgC m-2","Leaf Carbon Content") - out <- var_update(out,"TLAI","LAI","m2 m-2","Leaf Area Index") - out <- var_update(out,"TSOI_10CM","SoilTemp","K","Average Layer Soil Temperature at 10cm") - - ## put in time_bounds before writing out new nc file - length(out$var) - out$var[[length(out$var) + 1]] <- ncdf4::ncvar_def(name="time_bounds", units='', - longname = "history time interval endpoints", - dim=list(time_interval,time = t), - prec = "double") - out$dat[[length(out$dat) + 1]] <- c(rbind(bounds[, 1], bounds[, 2])) - - ## close input nc file - try(ncdf4::nc_close(ncin)) - - ## write netCDF data - ncout <- ncdf4::nc_create(oname,out$var) - ncdf4::ncatt_put(ncout, "time", "bounds", "time_bounds", prec=NA) for (i in seq_along(out$var)) { - ncdf4::ncvar_put(ncout, out$var[[i]], out$dat[[i]]) - } - - ## extract variable and long names to VAR file for PEcAn vis - utils::write.table(sapply(ncout$var, function(x) { x$longname }), - file = paste0(oname, ".var"), - col.names = FALSE, - row.names = TRUE, - quote = FALSE) - - - try(ncdf4::nc_close(ncout)) - - } # end of year for loop -} # model2netcdf.FATES - -### !!! NOTES -### extract variables. These need to be read in and converted to PEcAN standard - -# levgrnd:long_name = "coordinate soil levels" ; -# levlak:long_name = "coordinate lake levels" ; -# levdcmp:long_name = "coordinate soil levels" ; -# mcdate:long_name = "current date (YYYYMMDD)" ; -# mcsec:long_name = "current seconds of current date" ; -# mdcur:long_name = "current day (from base day)" ; -# mscur:long_name = "current seconds of current day" ; -# nstep:long_name = "time step" ; -# lon:long_name = "coordinate longitude" ; -# lat:long_name = "coordinate latitude" ; -# area:long_name = "grid cell areas" ; -# topo:long_name = "grid cell topography" ; -# landfrac:long_name = "land fraction" ; -# landmask:long_name = "land/ocean mask (0.=ocean and 1.=land)" ; -# pftmask:long_name = "pft real/fake mask (0.=fake and 1.=real)" ; -# ZSOI:long_name = "soil depth" ; -# DZSOI:long_name = "soil thickness" ; -# WATSAT:long_name = "saturated soil water content (porosity)" ; -# SUCSAT:long_name = "saturated soil matric potential" ; -# BSW:long_name = "slope of soil water retention curve" ; -# HKSAT:long_name = "saturated hydraulic conductivity" ; -# ZLAKE:long_name = "lake layer node depth" ; -# DZLAKE:long_name = "lake layer thickness" ; -# ACTUAL_IMMOB:long_name = "actual N immobilization" ; -# AGNPP:long_name = "aboveground NPP" ; -# ALT:long_name = "current active layer thickness" ; -# ALTMAX:long_name = "maximum annual active layer thickness" ; -# ALTMAX_LASTYEAR:long_name = "maximum prior year active layer thickness" ; -# AR:long_name = "autotrophic respiration (MR + GR)" ; -# BAF_CROP:long_name = "fractional area burned for crop" ; -# BAF_PEATF:long_name = "fractional area burned in peatland" ; -# BCDEP:long_name = "total BC deposition (dry+wet) from atmosphere" ; -# BGNPP:long_name = "belowground NPP" ; -# BUILDHEAT:long_name = "heat flux from urban building interior to walls and roof" ; -# COL_CTRUNC:long_name = "column-level sink for C truncation" ; -# COL_FIRE_CLOSS:long_name = "total column-level fire C loss for non-peat fires outside land-type converted region" ; -# COL_FIRE_NLOSS:long_name = "total column-level fire N loss" ; -# COL_NTRUNC:long_name = "column-level sink for N truncation" ; -# CPOOL:long_name = "temporary photosynthate C pool" ; -# CWDC:long_name = "CWD C" ; -# CWDC_HR:long_name = "coarse woody debris C heterotrophic respiration" ; -# CWDC_LOSS:long_name = "coarse woody debris C loss" ; -# CWDC_TO_LITR2C:long_name = "decomp. of coarse woody debris C to litter 2 C" ; -# CWDC_TO_LITR3C:long_name = "decomp. of coarse woody debris C to litter 3 C" ; -# CWDN:long_name = "CWD N" ; -# CWDN_TO_LITR2N:long_name = "decomp. of coarse woody debris N to litter 2 N" ; -# CWDN_TO_LITR3N:long_name = "decomp. of coarse woody debris N to litter 3 N" ; -# DEADCROOTC:long_name = "dead coarse root C" ; -# DEADCROOTN:long_name = "dead coarse root N" ; -# DEADSTEMC:long_name = "dead stem C" ; -# DEADSTEMN:long_name = "dead stem N" ; -# DENIT:long_name = "total rate of denitrification" ; -# DISPVEGC:long_name = "displayed veg carbon, excluding storage and cpool" -# DISPVEGN:long_name = "displayed vegetation nitrogen" ; -# DSTDEP:long_name = "total dust deposition (dry+wet) from atmosphere" ; -# DSTFLXT:long_name = "total surface dust emission" ; -# DWT_CLOSS:long_name = "total carbon loss from land cover conversion" ; -# DWT_CONV_CFLUX:long_name = "conversion C flux (immediate loss to atm)" ; -# DWT_CONV_NFLUX:long_name = "conversion N flux (immediate loss to atm)" ; -# DWT_NLOSS:long_name = "total nitrogen loss from landcover conversion" ; -# DWT_PROD100C_GAIN:long_name = "landcover change-driven addition to 100-yr wood product pool" ; -# DWT_PROD100N_GAIN:long_name = "addition to 100-yr wood product pool" ; -# DWT_PROD10C_GAIN:long_name = "landcover change-driven addition to 10-yr wood product pool" ; -# DWT_PROD10N_GAIN:long_name = "addition to 10-yr wood product pool" ; -# DWT_SEEDC_TO_DEADSTEM:long_name = "seed source to patch-level deadstem" ; -# DWT_SEEDC_TO_LEAF:long_name = "seed source to patch-level leaf" ; -# DWT_SEEDN_TO_DEADSTEM:long_name = "seed source to PFT-level deadstem" ; -# DWT_SEEDN_TO_LEAF:long_name = "seed source to PFT-level leaf" ; -# EFLX_DYNBAL:long_name = "dynamic land cover change conversion energy flux" ; -# EFLX_GRND_LAKE:long_name = "net heat flux into lake/snow surface, excluding light transmission" ; -# EFLX_LH_TOT:long_name = "total latent heat flux [+ to atm]" ; -# EFLX_LH_TOT_R:long_name = "Rural total evaporation" ; -# EFLX_LH_TOT_U:long_name = "Urban total evaporation" ; -# ELAI:long_name = "exposed one-sided leaf area index" ; -# ER:long_name = "total ecosystem respiration, autotrophic + heterotrophic" ; -# ERRH2O:long_name = "total water conservation error" ; -# ERRH2OSNO:long_name = "imbalance in snow depth (liquid water)" ; -# ERRSEB:long_name = "surface energy conservation error" ; -# ERRSOI:long_name = "soil/lake energy conservation error" ; -# ERRSOL:long_name = "solar radiation conservation error" ; -# ESAI:long_name = "exposed one-sided stem area index" ; -# FAREA_BURNED:long_name = "timestep fractional area burned" ; -# FCEV:long_name = "canopy evaporation" ; -# FCOV:long_name = "fractional impermeable area" ; -# FCTR:long_name = "canopy transpiration" ; -# FGEV:long_name = "ground evaporation" ; -# FGR:long_name = "heat flux into soil/snow including snow melt and lake / snow light transmission" ; -# FGR12:long_name = "heat flux between soil layers 1 and 2" ; -# FGR_R:long_name = "Rural heat flux into soil/snow including snow melt and snow light transmission" ; -# FGR_U:long_name = "Urban heat flux into soil/snow including snow melt" ; -# FH2OSFC:long_name = "fraction of ground covered by surface water" ; -# FIRA:long_name = "net infrared (longwave) radiation" ; -# FIRA_R:long_name = "Rural net infrared (longwave) radiation" ; -# FIRA_U:long_name = "Urban net infrared (longwave) radiation" ; -# FIRE:long_name = "emitted infrared (longwave) radiation" ; -# FIRE_R:long_name = "Rural emitted infrared (longwave) radiation" ; -# FIRE_U:long_name = "Urban emitted infrared (longwave) radiation" ; -# FLDS:long_name = "atmospheric longwave radiation" ; -# FPG:long_name = "fraction of potential gpp" ; -# FPI:long_name = "fraction of potential immobilization" ; -# FPSN:long_name = "photosynthesis" ; -# FPSN_WC:long_name = "Rubisco-limited photosynthesis" ; -# FPSN_WJ:long_name = "RuBP-limited photosynthesis" ; -# FPSN_WP:long_name = "Product-limited photosynthesis" ; -# FROOTC:long_name = "fine root C" ; -# FROOTC_ALLOC:long_name = "fine root C allocation" ; -# FROOTC_LOSS:long_name = "fine root C loss" ; -# FROOTN:long_name = "fine root N" ; -# FSA:long_name = "absorbed solar radiation" ; -# FSAT:long_name = "fractional area with water table at surface" ; -# FSA_R:long_name = "Rural absorbed solar radiation" ; -# FSA_U:long_name = "Urban absorbed solar radiation" ; -# FSDS:long_name = "atmospheric incident solar radiation" ; -# FSDSND:long_name = "direct nir incident solar radiation" ; -# FSDSNDLN:long_name = "direct nir incident solar radiation at local noon" ; -# FSDSNI:long_name = "diffuse nir incident solar radiation" ; -# FSDSVD:long_name = "direct vis incident solar radiation" ; -# FSDSVDLN:long_name = "direct vis incident solar radiation at local noon" ; -# FSDSVI:long_name = "diffuse vis incident solar radiation" ; -# FSDSVILN:long_name = "diffuse vis incident solar radiation at local noon" ; -# FSH:long_name = "sensible heat" ; -# FSH_G:long_name = "sensible heat from ground" ; -# FSH_NODYNLNDUSE:long_name = "sensible heat not including correction for land use change" ; -# FSH_R:long_name = "Rural sensible heat" ; -# FSH_U:long_name = "Urban sensible heat" ; -# FSH_V:long_name = "sensible heat from veg" ; -# FSM:long_name = "snow melt heat flux" ; -# FSM_R:long_name = "Rural snow melt heat flux" ; -# FSM_U:long_name = "Urban snow melt heat flux" ; -# FSNO:long_name = "fraction of ground covered by snow" ; -# FSNO_EFF:long_name = "effective fraction of ground covered by snow" ; -# FSR:long_name = "reflected solar radiation" ; -# FSRND:long_name = "direct nir reflected solar radiation" ; -# FSRNDLN:long_name = "direct nir reflected solar radiation at local noon" ; -# FSRNI:long_name = "diffuse nir reflected solar radiation" ; -# FSRVD:long_name = "direct vis reflected solar radiation" ; -# FSRVDLN:long_name = "direct vis reflected solar radiation at local noon" ; -# FSRVI:long_name = "diffuse vis reflected solar radiation" ; -# FUELC:long_name = "fuel load" ; -# GC_HEAT1:long_name = "initial gridcell total heat content" ; -# GC_ICE1:long_name = "initial gridcell total ice content" ; -# GC_LIQ1:long_name = "initial gridcell total liq content" ; -# GPP:long_name = "gross primary production" ; -# GR:long_name = "total growth respiration" ; -# GROSS_NMIN:long_name = "gross rate of N mineralization" ; -# H2OCAN:long_name = "intercepted water" ; -# H2OSFC:long_name = "surface water depth" ; -# H2OSNO:long_name = "snow depth (liquid water)" ; -# H2OSNO_TOP:long_name = "mass of snow in top snow layer" ; -# HC:long_name = "heat content of soil/snow/lake" ; -# HCSOI:long_name = "soil heat content" ; -# HEAT_FROM_AC:long_name = "sensible heat flux put into canyon due to heat removed from air conditioning" ; -# HR:long_name = "total heterotrophic respiration" ; -# HTOP:long_name = "canopy top" ; -# LAISHA:long_name = "shaded projected leaf area index" ; -# LAISUN:long_name = "sunlit projected leaf area index" ; -# LAKEICEFRAC:long_name = "lake layer ice mass fraction" ; -# LAKEICETHICK:long_name = "thickness of lake ice (including physical expansion on freezing)" ; -# LAND_UPTAKE:long_name = "NEE minus LAND_USE_FLUX, negative for update" ; -# LAND_USE_FLUX:long_name = "total C emitted from land cover conversion and wood product pools" ; -# LEAFC:long_name = "leaf C" ; -# LEAFC_ALLOC:long_name = "leaf C allocation" ; -# LEAFC_LOSS:long_name = "leaf C loss" ; -# LEAFN:long_name = "leaf N" ; -# LEAF_MR:long_name = "leaf maintenance respiration" ; -# LFC2:long_name = "conversion area fraction of BET and BDT that burned" ; -# LF_CONV_CFLUX:long_name = "conversion carbon due to BET and BDT area decreasing" ; -# LITFALL:long_name = "litterfall (leaves and fine roots)" ; -# LITHR:long_name = "litter heterotrophic respiration" ; -# LITR1C:long_name = "LITR1 C" ; -# LITR1C_TO_SOIL1C:long_name = "decomp. of litter 1 C to soil 1 C" ; -# LITR1N:long_name = "LITR1 N" ; -# LITR1N_TNDNCY_VERT_TRANS:long_name = "litter 1 N tendency due to vertical transport" ; -# LITR1N_TO_SOIL1N:long_name = "decomp. of litter 1 N to soil 1 N" ; -# LITR1_HR:long_name = "Het. Resp. from litter 1" ; -# LITR2C:long_name = "LITR2 C" ; -# LITR2C_TO_SOIL2C:long_name = "decomp. of litter 2 C to soil 2 C" ; -# LITR2N:long_name = "LITR2 N" ; -# LITR2N_TNDNCY_VERT_TRANS:long_name = "litter 2 N tendency due to vertical transport" ; -# LITR2N_TO_SOIL2N:long_name = "decomp. of litter 2 N to soil 2 N" ; -# LITR2_HR:long_name = "Het. Resp. from litter 2" ; -# LITR3C:long_name = "LITR3 C" ; -# LITR3C_TO_SOIL3C:long_name = "decomp. of litter 3 C to soil 3 C" ; -# LITR3N:long_name = "LITR3 N" ; -# LITR3N_TNDNCY_VERT_TRANS:long_name = "litter 3 N tendency due to vertical transport" ; -# LITR3N_TO_SOIL3N:long_name = "decomp. of litter 3 N to soil 3 N" ; -# LITR3_HR:long_name = "Het. Resp. from litter 3" ; -# LITTERC:long_name = "litter C" ; -# LITTERC_HR:long_name = "litter C heterotrophic respiration" ; -# LITTERC_LOSS:long_name = "litter C loss" ; -# LIVECROOTC:long_name = "live coarse root C" ; -# LIVECROOTN:long_name = "live coarse root N" ; -# LIVESTEMC:long_name = "live stem C" ; -# LIVESTEMN:long_name = "live stem N" ; -# MEG_acetaldehyde:long_name = "MEGAN flux" ; -# MEG_acetic_acid:long_name = "MEGAN flux" ; -# MEG_acetone:long_name = "MEGAN flux" ; -# MEG_carene_3:long_name = "MEGAN flux" ; -# MEG_ethanol:long_name = "MEGAN flux" ; -# MEG_formaldehyde:long_name = "MEGAN flux" ; -# MEG_isoprene:long_name = "MEGAN flux" ; -# MEG_methanol:long_name = "MEGAN flux" ; -# MEG_pinene_a:long_name = "MEGAN flux" ; -# MEG_thujene_a:long_name = "MEGAN flux" ; -# MR:long_name = "maintenance respiration" ; -# M_LITR1C_TO_LEACHING:long_name = "litter 1 C leaching loss" ; -# M_LITR2C_TO_LEACHING:long_name = "litter 2 C leaching loss" ; -# M_LITR3C_TO_LEACHING:long_name = "litter 3 C leaching loss" ; -# M_SOIL1C_TO_LEACHING:long_name = "soil 1 C leaching loss" ; -# M_SOIL2C_TO_LEACHING:long_name = "soil 2 C leaching loss" ; -# M_SOIL3C_TO_LEACHING:long_name = "soil 3 C leaching loss" ; -# M_SOIL4C_TO_LEACHING:long_name = "soil 4 C leaching loss" ; -# NBP:long_name = "net biome production, includes fire, landuse, and harvest flux, positive for sink" ; -# NDEPLOY:long_name = "total N deployed in new growth" ; -# NDEP_TO_SMINN:long_name = "atmospheric N deposition to soil mineral N" ; -# NEE:long_name = "net ecosystem exchange of carbon, includes fire, landuse, harvest, and hrv_xsmrpool flux, positive for source" ; -# NEP:long_name = "net ecosystem production, excludes fire, landuse, and harvest flux, positive for sink" ; -# NET_NMIN:long_name = "net rate of N mineralization" ; -# NFIRE:long_name = "fire counts valid only in Reg.C" ; -# NFIX_TO_SMINN:long_name = "symbiotic/asymbiotic N fixation to soil mineral N" ; -# NPP:long_name = "net primary production" ; -# OCDEP:long_name = "total OC deposition (dry+wet) from atmosphere" ; -# O_SCALAR:long_name = "fraction by which decomposition is reduced due to anoxia" ; -# PARVEGLN:long_name = "absorbed par by vegetation at local noon" ; -# PBOT:long_name = "atmospheric pressure" ; -# PCO2:long_name = "atmospheric partial pressure of CO2" ; -# PCT_LANDUNIT:long_name = "% of each landunit on grid cell" ; -# PCT_NAT_PFT:long_name = "% of each PFT on the natural vegetation (i.e., soil) landunit" ; -# PFT_CTRUNC:long_name = "patch-level sink for C truncation" ; -# PFT_FIRE_CLOSS:long_name = "total patch-level fire C loss for non-peat fires outside land-type converted region" ; -# PFT_FIRE_NLOSS:long_name = "total pft-level fire N loss" ; -# PFT_NTRUNC:long_name = "pft-level sink for N truncation" ; -# PLANT_NDEMAND:long_name = "N flux required to support initial GPP" ; -# POTENTIAL_IMMOB:long_name = "potential N immobilization" ; -# PROD100C:long_name = "100-yr wood product C" ; -# PROD100C_LOSS:long_name = "loss from 100-yr wood product pool" ; -# PROD100N:long_name = "100-yr wood product N" ; -# PROD100N_LOSS:long_name = "loss from 100-yr wood product pool" ; -# PROD10C:long_name = "10-yr wood product C" ; -# PROD10C_LOSS:long_name = "loss from 10-yr wood product pool" ; -# PROD10N:long_name = "10-yr wood product N" ; -# PROD10N_LOSS:long_name = "loss from 10-yr wood product pool" ; -# PRODUCT_CLOSS:long_name = "total carbon loss from wood product pools" ; -# PRODUCT_NLOSS:long_name = "total N loss from wood product pools" ; -# PSNSHA:long_name = "shaded leaf photosynthesis" ; -# PSNSHADE_TO_CPOOL:long_name = "C fixation from shaded canopy" ; -# PSNSUN:long_name = "sunlit leaf photosynthesis" ; -# PSNSUN_TO_CPOOL:long_name = "C fixation from sunlit canopy" ; -# Q2M:long_name = "2m specific humidity" ; -# QBOT:long_name = "atmospheric specific humidity" ; -# QDRAI:long_name = "sub-surface drainage" ; -# QDRAI_PERCH:long_name = "perched wt drainage" ; -# QDRAI_XS:long_name = "saturation excess drainage" ; -# QDRIP:long_name = "throughfall" ; -# QFLOOD:long_name = "runoff from river flooding" ; -# QFLX_ICE_DYNBAL:long_name = "ice dynamic land cover change conversion runoff flux" ; -# QFLX_LIQ_DYNBAL:long_name = "liq dynamic land cover change conversion runoff flux" ; -# QH2OSFC:long_name = "surface water runoff" ; -# QINFL:long_name = "infiltration" ; -# QINTR:long_name = "interception" ; -# QIRRIG:long_name = "water added through irrigation" ; -# QOVER:long_name = "surface runoff" ; -# QRGWL:long_name = "surface runoff at glaciers (liquid only), wetlands, lakes" ; -# QRUNOFF:long_name = "total liquid runoff (does not include QSNWCPICE)" ; -# QRUNOFF_NODYNLNDUSE:long_name = "total liquid runoff (does not include QSNWCPICE) not including correction for land use change" ; -# QRUNOFF_R:long_name = "Rural total runoff" ; -# QRUNOFF_U:long_name = "Urban total runoff" ; -# QSNOMELT:long_name = "snow melt" ; -# QSNWCPICE:long_name = "excess snowfall due to snow capping" ; -# QSNWCPICE_NODYNLNDUSE:long_name = "excess snowfall due to snow capping not including correction for land use change" ; -# QSOIL:long_name = "Ground evaporation (soil/snow evaporation + soil/snow sublimation - dew)" ; -# QVEGE:long_name = "canopy evaporation" ; -# QVEGT:long_name = "canopy transpiration" ; -# RAIN:long_name = "atmospheric rain" ; -# RETRANSN:long_name = "plant pool of retranslocated N" ; -# RETRANSN_TO_NPOOL:long_name = "deployment of retranslocated N" ; -# RH2M:long_name = "2m relative humidity" ; -# RH2M_R:long_name = "Rural 2m specific humidity" ; -# RH2M_U:long_name = "Urban 2m relative humidity" ; -# RR:long_name = "root respiration (fine root MR + total root GR)" ; -# RSCANOPY:long_name = "canopy resistance" ; -# SABG:long_name = "solar rad absorbed by ground" ; -# SABG_PEN:long_name = "Rural solar rad penetrating top soil or snow layer" ; -# SABV:long_name = "solar rad absorbed by veg" ; -# SEEDC:long_name = "pool for seeding new Patches" ; -# SEEDN:long_name = "pool for seeding new PFTs" ; -# SMINN:long_name = "soil mineral N" ; -# SMINN_LEACHED:long_name = "soil mineral N pool loss to leaching" ; -# SMINN_TO_DENIT_L1S1:long_name = "denitrification for decomp. of litter 1to SOIL1" ; -# SMINN_TO_DENIT_L2S2:long_name = "denitrification for decomp. of litter 2to SOIL2" ; -# SMINN_TO_DENIT_L3S3:long_name = "denitrification for decomp. of litter 3to SOIL3" ; -# SMINN_TO_DENIT_S1S2:long_name = "denitrification for decomp. of soil 1to SOIL2" ; -# SMINN_TO_DENIT_S2S3:long_name = "denitrification for decomp. of soil 2to SOIL3" ; -# SMINN_TO_DENIT_S3S4:long_name = "denitrification for decomp. of soil 3to SOIL4" ; -# SMINN_TO_DENIT_S4:long_name = "denitrification for decomp. of soil 4to atmosphe" ; -# SMINN_TO_NPOOL:long_name = "deployment of soil mineral N uptake" ; -# SMINN_TO_PLANT:long_name = "plant uptake of soil mineral N" ; -# SMINN_TO_SOIL1N_L1:long_name = "mineral N flux for decomp. of LITR1to SOIL1" ; -# SMINN_TO_SOIL2N_L2:long_name = "mineral N flux for decomp. of LITR2to SOIL2" ; -# SMINN_TO_SOIL2N_S1:long_name = "mineral N flux for decomp. of SOIL1to SOIL2" ; -# SMINN_TO_SOIL3N_L3:long_name = "mineral N flux for decomp. of LITR3to SOIL3" ; -# SMINN_TO_SOIL3N_S2:long_name = "mineral N flux for decomp. of SOIL2to SOIL3" ; -# SMINN_TO_SOIL4N_S3:long_name = "mineral N flux for decomp. of SOIL3to SOIL4" ; -# SNOBCMCL:long_name = "mass of BC in snow column" ; -# SNOBCMSL:long_name = "mass of BC in top snow layer" ; -# SNODSTMCL:long_name = "mass of dust in snow column" ; -# SNODSTMSL:long_name = "mass of dust in top snow layer" ; -# SNOINTABS:long_name = "Percent of incoming solar absorbed by lower snow layers" ; -# SNOOCMCL:long_name = "mass of OC in snow column" ; -# SNOOCMSL:long_name = "mass of OC in top snow layer" ; -# SNOW:long_name = "atmospheric snow" ; -# SNOWDP:long_name = "gridcell mean snow height" ; -# SNOWICE:long_name = "snow ice" ; -# SNOWLIQ:long_name = "snow liquid water" ; -# SNOW_DEPTH:long_name = "snow height of snow covered area" ; -# SNOW_SINKS:long_name = "snow sinks (liquid water)" ; -# SNOW_SOURCES:long_name = "snow sources (liquid water)" ; -# SOIL1C:long_name = "SOIL1 C" ; -# SOIL1C_TO_SOIL2C:long_name = "decomp. of soil 1 C to soil 2 C" ; -# SOIL1N:long_name = "SOIL1 N" ; -# SOIL1N_TNDNCY_VERT_TRANS:long_name = "soil 1 N tendency due to vertical transport" ; -# SOIL1N_TO_SOIL2N:long_name = "decomp. of soil 1 N to soil 2 N" ; -# SOIL1_HR:long_name = "Het. Resp. from soil 1" ; -# SOIL2C:long_name = "SOIL2 C" ; -# SOIL2C_TO_SOIL3C:long_name = "decomp. of soil 2 C to soil 3 C" ; -# SOIL2N:long_name = "SOIL2 N" ; -# SOIL2N_TNDNCY_VERT_TRANS:long_name = "soil 2 N tendency due to vertical transport" ; -# SOIL2N_TO_SOIL3N:long_name = "decomp. of soil 2 N to soil 3 N" ; -# SOIL2_HR:long_name = "Het. Resp. from soil 2" ; -# SOIL3C:long_name = "SOIL3 C" ; -# SOIL3C_TO_SOIL4C:long_name = "decomp. of soil 3 C to soil 4 C" ; -# SOIL3N:long_name = "SOIL3 N" ; -# SOIL3N_TNDNCY_VERT_TRANS:long_name = "soil 3 N tendency due to vertical transport" ; -# SOIL3N_TO_SOIL4N:long_name = "decomp. of soil 3 N to soil 4 N" ; -# SOIL3_HR:long_name = "Het. Resp. from soil 3" ; -# SOIL4C:long_name = "SOIL4 C" ; -# SOIL4N:long_name = "SOIL4 N" ; -# SOIL4N_TNDNCY_VERT_TRANS:long_name = "soil 4 N tendency due to vertical transport" ; -# SOIL4N_TO_SMINN:long_name = "mineral N flux for decomp. of SOIL4" ; -# SOIL4_HR:long_name = "Het. Resp. from soil 4" ; -# SOILC:long_name = "soil C" ; -# SOILC_HR:long_name = "soil C heterotrophic respiration" ; -# SOILC_LOSS:long_name = "soil C loss" ; -# SOILPSI:long_name = "soil water potential in each soil layer" ; -# SOMC_FIRE:long_name = "C loss due to peat burning" ; -# SOMHR:long_name = "soil organic matter heterotrophic respiration" ; -# SOM_C_LEACHED:long_name = "total flux of C from SOM pools due to leaching" ; -# SR:long_name = "total soil respiration (HR + root resp)" ; -# STORVEGC:long_name = "stored vegetation carbon, excluding cpool" ; -# STORVEGN:long_name = "stored vegetation nitrogen" ; -# SUPPLEMENT_TO_SMINN:long_name = "supplemental N supply" ; -# SoilAlpha:long_name = "factor limiting ground evap" ; -# SoilAlpha_U:long_name = "urban factor limiting ground evap" ; -# TAUX:long_name = "zonal surface stress" ; -# TAUY:long_name = "meridional surface stress" ; -# TBOT:long_name = "atmospheric air temperature" ; -# TBUILD:long_name = "internal urban building temperature" ; -# TG:long_name = "ground temperature" ; -# TG_R:long_name = "Rural ground temperature" ; -# TG_U:long_name = "Urban ground temperature" ; -# TH2OSFC:long_name = "surface water temperature" ; -# THBOT:long_name = "atmospheric air potential temperature" ; -# TKE1:long_name = "top lake level eddy thermal conductivity" ; -# TLAI:long_name = "total projected leaf area index" ; -# TLAKE:long_name = "lake temperature" ; -# TOTCOLC:long_name = "total column carbon, incl veg and cpool" ; -# TOTCOLN:long_name = "total column-level N" ; -# TOTECOSYSC:long_name = "total ecosystem carbon, incl veg but excl cpool" ; -# TOTECOSYSN:long_name = "total ecosystem N" ; -# TOTLITC:long_name = "total litter carbon" ; -# TOTLITN:long_name = "total litter N" ; -# TOTPFTC:long_name = "total patch-level carbon, including cpool" ; -# TOTPFTN:long_name = "total PFT-level nitrogen" ; -# TOTPRODC:long_name = "total wood product C" ; -# TOTPRODN:long_name = "total wood product N" ; -# TOTSOMC:long_name = "total soil organic matter carbon" ; -# TOTSOMN:long_name = "total soil organic matter N" ; -# TOTVEGC:long_name = "total vegetation carbon, excluding cpool" ; -# TOTVEGN:long_name = "total vegetation nitrogen" ; -# TREFMNAV:long_name = "daily minimum of average 2-m temperature" ; -# TREFMNAV_R:long_name = "Rural daily minimum of average 2-m temperature" ; -# TREFMNAV_U:long_name = "Urban daily minimum of average 2-m temperature" ; -# TREFMXAV:long_name = "daily maximum of average 2-m temperature" ; -# TREFMXAV_R:long_name = "Rural daily maximum of average 2-m temperature" ; -# TREFMXAV_U:long_name = "Urban daily maximum of average 2-m temperature" ; -# TSA:long_name = "2m air temperature" ; -# TSAI:long_name = "total projected stem area index" ; -# TSA_R:long_name = "Rural 2m air temperature" ; -# TSA_U:long_name = "Urban 2m air temperature" ; -# TSOI_10CM:long_name = "soil temperature in top 10cm of soil" ; -# TV:long_name = "vegetation temperature" ; -# TWS:long_name = "total water storage" ; -# T_SCALAR:long_name = "temperature inhibition of decomposition" ; -# U10:long_name = "10-m wind" ; -# URBAN_AC:long_name = "urban air conditioning flux" ; -# URBAN_HEAT:long_name = "urban heating flux" ; -# VOCFLXT:long_name = "total VOC flux into atmosphere" ; -# VOLR:long_name = "river channel water storage" ; -# WASTEHEAT:long_name = "sensible heat flux from heating/cooling sources of urban waste heat" ; -# WF:long_name = "soil water as frac. of whc for top 0.05 m" ; -# WIND:long_name = "atmospheric wind velocity magnitude" ; -# WOODC:long_name = "wood C" ; -# WOODC_ALLOC:long_name = "wood C eallocation" ; -# WOODC_LOSS:long_name = "wood C loss" ; -# WOOD_HARVESTC:long_name = "wood harvest carbon (to product pools)" ; -# WOOD_HARVESTN:long_name = "wood harvest N (to product pools)" ; -# W_SCALAR:long_name = "Moisture (dryness) inhibition of decomposition" -## ==================================================================================================# -## EOF + if(out$dimm[[i]]==4){ # xypt + ncdf4::ncvar_put(ncout, out$var[[i]], out$dat[[i]], start=c(1,1,1,mo), count=c(1,1,12,1)) + }else if (out$dimm[[i]]==3) { # xyt + ncdf4::ncvar_put(ncout, out$var[[i]], out$dat[[i]], start=c(1,1,mo)) + }else{ # time_bounds + ncdf4::ncvar_put(ncout, out$var[[i]], out$dat[[i]], start=c(1,mo)) + } + } + } + } ## monthly convert variable into PEcAn format + } + ## extract variable and long names to VAR file for PEcAn vis + utils::write.table(sapply(ncout$var, function(x) { x$longname }), + file = paste0(oname, ".var"), + col.names = FALSE, + row.names = TRUE, + quote = FALSE) + try(ncdf4::nc_close(ncout)) ## end of year for loop +} ## model2netcdf.FATES \ No newline at end of file diff --git a/models/fates/R/recurse.create.R b/models/fates/R/recurse.create.R index 753c7f10f4d..c274f34a871 100644 --- a/models/fates/R/recurse.create.R +++ b/models/fates/R/recurse.create.R @@ -1,11 +1,3 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2016 NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# University of Illinois/NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- ##' @name recurse.create ##' @title recurse.create diff --git a/models/fates/R/version.R b/models/fates/R/version.R new file mode 100644 index 00000000000..0e58d885272 --- /dev/null +++ b/models/fates/R/version.R @@ -0,0 +1,3 @@ +# Set at package install time, used by pecan.all::pecan_version() +# to identify development versions of packages +.build_hash <- Sys.getenv("PECAN_GIT_REV", "unknown") diff --git a/models/fates/R/write.configs.FATES.R b/models/fates/R/write.configs.FATES.R index 67b1ddf87ce..5b8fabb6a6f 100644 --- a/models/fates/R/write.configs.FATES.R +++ b/models/fates/R/write.configs.FATES.R @@ -1,13 +1,3 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2016 NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# University of Illinois/NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- - -##-------------------------------------------------------------------------------------------------# ##' Writes config files for use with FATES. ##' ##' @name write.config.FATES @@ -19,7 +9,6 @@ ##' @return none ##' @export ##' @author Mike Dietze, Shawn Serbin -##-------------------------------------------------------------------------------------------------# write.config.FATES <- function(defaults, trait.values, settings, run.id){ ## site information diff --git a/models/fates/man/met2model.FATES.Rd b/models/fates/man/met2model.FATES.Rd index 3899e5360ea..681e180d99e 100644 --- a/models/fates/man/met2model.FATES.Rd +++ b/models/fates/man/met2model.FATES.Rd @@ -25,15 +25,19 @@ met2model.FATES( \item{outfolder}{location on disk where outputs will be stored} -\item{start_date}{the start date of the data to be downloaded (will only use the year part of the date)} +\item{start_date}{the start date of the data to be downloaded} -\item{end_date}{the end date of the data to be downloaded (will only use the year part of the date)} +\item{end_date}{the end date of the data to be downloaded} \item{lst}{timezone offset to GMT in hours} +\item{lat, lon}{latitude and longitude of site in decimal degrees} + \item{overwrite}{should existing files be overwritten} -\item{verbose}{should the function be very verbosefor(year in start_year:end_year)} +\item{verbose}{should the function be very verbose for(year in start_year:end_year)} + +\item{...}{additional arguments, currently ignored} } \description{ met2model wrapper for FATES diff --git a/models/fates/man/model2netcdf.FATES.Rd b/models/fates/man/model2netcdf.FATES.Rd index b1338622936..2fafb91db21 100644 --- a/models/fates/man/model2netcdf.FATES.Rd +++ b/models/fates/man/model2netcdf.FATES.Rd @@ -4,10 +4,30 @@ \alias{model2netcdf.FATES} \title{Code to convert FATES netcdf output into into CF standard} \usage{ -model2netcdf.FATES(outdir) +model2netcdf.FATES( + outdir, + sitelat, + sitelon, + start_date, + end_date, + vars_names, + pfts +) } \arguments{ -\item{outdir}{Location of FATES model output} +\item{outdir}{Location of FATES model output (e.g. a path to a single ensemble output)} + +\item{sitelat}{Latitude of the site} + +\item{sitelon}{Longitude of the site} + +\item{start_date}{Start time of the simulation, not string} + +\item{end_date}{End time of the simulation, not string} + +\item{vars_names}{Names of Selected variables in PEcAn format, (e.g. c("",""))} + +\item{pfts}{a named vector of PFT numbers where the names are PFT names} } \description{ Code to convert FATES netcdf output into into CF standard @@ -16,7 +36,7 @@ Code to convert FATES netcdf output into into CF standard \dontrun{ example.output <- system.file("case.clm2.h0.2004-01-01-00000.nc",package="PEcAn.FATES") -model2netcdf.FATES(outdir="~/") +model2netcdf.FATES(outdir="~/",sitelat, sitelon, start_date, end_date, vars_names, pfts) } } diff --git a/models/fates/tests/Rcheck_reference.log b/models/fates/tests/Rcheck_reference.log index 2d8774fc596..9a243925f81 100644 --- a/models/fates/tests/Rcheck_reference.log +++ b/models/fates/tests/Rcheck_reference.log @@ -90,15 +90,7 @@ Author field differs from that derived from Authors@R * checking Rd cross-references ... OK * checking for missing documentation entries ... OK * checking for code/documentation mismatches ... OK -* checking Rd \usage sections ... WARNING -Undocumented arguments in documentation object 'met2model.FATES' - ‘lat’ ‘lon’ ‘...’ - -Functions with \usage entries need to have the appropriate \alias -entries, and all their arguments documented. -The \usage entries must correspond to syntactically valid R code. -See chapter ‘Writing R documentation files’ in the ‘Writing R -Extensions’ manual. +* checking Rd \usage sections ... OK * checking Rd contents ... OK * checking for unstated dependencies in examples ... OK * checking examples ... OK diff --git a/models/fates/tests/testthat.R b/models/fates/tests/testthat.R index 5dec6bcf474..508e1f9653f 100644 --- a/models/fates/tests/testthat.R +++ b/models/fates/tests/testthat.R @@ -1,11 +1,3 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2012 University of Illinois, NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# University of Illinois/NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- library(testthat) library(PEcAn.utils) diff --git a/models/gday/DESCRIPTION b/models/gday/DESCRIPTION index d0ef5827bdc..3cda7b49439 100644 --- a/models/gday/DESCRIPTION +++ b/models/gday/DESCRIPTION @@ -1,8 +1,7 @@ Package: PEcAn.GDAY Type: Package Title: PEcAn Package for Integration of the GDAY Model -Version: 1.7.2 -Date: 2021-10-04 +Version: 1.7.3.9000 Authors@R: c(person("Martin", "De Kauwe", role = c("aut", "cre"), email = "mdekauwe@gmail.com"), person("Tony", "Gardella", role = c("aut"), @@ -27,4 +26,4 @@ Copyright: Authors LazyLoad: yes LazyData: TRUE Encoding: UTF-8 -RoxygenNote: 7.2.3 +RoxygenNote: 7.3.2 diff --git a/models/gday/LICENSE b/models/gday/LICENSE index 2a93336dfae..09ef35a60b4 100644 --- a/models/gday/LICENSE +++ b/models/gday/LICENSE @@ -1,33 +1,3 @@ -## This is the master copy of the PEcAn License - -University of Illinois/NCSA Open Source License - -Copyright (c) 2012, University of Illinois, NCSA. All rights reserved. - -PEcAn project -www.pecanproject.org - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal with the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -- Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimers. -- Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimers in the - documentation and/or other materials provided with the distribution. -- Neither the names of University of Illinois, NCSA, nor the names - of its contributors may be used to endorse or promote products - derived from this Software without specific prior written permission. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR -ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF -CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE SOFTWARE. +YEAR: 2024 +COPYRIGHT HOLDER: PEcAn Project +ORGANIZATION: PEcAn Project, authors affiliations diff --git a/models/gday/NEWS.md b/models/gday/NEWS.md new file mode 100644 index 00000000000..d071e4bce78 --- /dev/null +++ b/models/gday/NEWS.md @@ -0,0 +1,7 @@ +# PEcAn.GDAY 1.7.3.9000 + +## License change +* PEcAn.GDAY is now distributed under the BSD three-clause license instead of the NCSA Open Source license. + +## Added +* Added a `NEWS.md` file to track changes to the package. Prior to this point changes are tracked in the main CHANGELOG for the PEcAn repository. diff --git a/models/gday/R/met2model.GDAY.R b/models/gday/R/met2model.GDAY.R index 0503f3a4823..c38338f2aba 100644 --- a/models/gday/R/met2model.GDAY.R +++ b/models/gday/R/met2model.GDAY.R @@ -1,12 +1,3 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2015 Boston University, NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- - # R Code to convert NetCDF CF met files into GDAY met files ## If files already exist in 'Outfolder', the default function is NOT to overwrite them and only @@ -32,6 +23,7 @@ ##' the year part of the date) ##' @param overwrite should existing files be overwritten ##' @param verbose should the function be very verbose +##' @param ... additional arguments, currently ignored ##' @return generates GDAY formatted met file as a side affect, returns file metadata ##' that will be inserted into database ##' @author Martin De Kauwe, Tony Gardella diff --git a/models/gday/R/model2netcdf.GDAY.R b/models/gday/R/model2netcdf.GDAY.R index b4218899ae2..60abe96bf6f 100644 --- a/models/gday/R/model2netcdf.GDAY.R +++ b/models/gday/R/model2netcdf.GDAY.R @@ -1,13 +1,3 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2015 Boston University, NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- - -#------------------------------------------------------------------------------# ##' Convert GDAY output to netCDF ##' ##' Converts all output contained in a folder to netCDF. diff --git a/models/gday/R/version.R b/models/gday/R/version.R new file mode 100644 index 00000000000..0e58d885272 --- /dev/null +++ b/models/gday/R/version.R @@ -0,0 +1,3 @@ +# Set at package install time, used by pecan.all::pecan_version() +# to identify development versions of packages +.build_hash <- Sys.getenv("PECAN_GIT_REV", "unknown") diff --git a/models/gday/R/write.config.GDAY.R b/models/gday/R/write.config.GDAY.R index 824c240fe25..cd7267327c0 100644 --- a/models/gday/R/write.config.GDAY.R +++ b/models/gday/R/write.config.GDAY.R @@ -1,13 +1,3 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2015 Boston University, NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- - -##-------------------------------------------------------------------------------------------------# ##' Writes a config file for GDAY ##' ##' Requires a pft xml object, a list of trait values for a single model run, @@ -16,7 +6,7 @@ ##' @name write.config.GDAY ##' @title Write GDAY configuration files ##' @param defaults list of defaults to process -##' @param trait.samples vector of samples for a given trait +##' @param trait.values vector of samples for a given trait ##' @param settings list of settings from pecan settings file ##' @param run.id id of run ##' @return configuration file for GDAY for given run diff --git a/models/gday/man/met2model.GDAY.Rd b/models/gday/man/met2model.GDAY.Rd index 3d162c93932..5a180d85eb5 100644 --- a/models/gday/man/met2model.GDAY.Rd +++ b/models/gday/man/met2model.GDAY.Rd @@ -31,6 +31,8 @@ the year part of the date)} \item{overwrite}{should existing files be overwritten} \item{verbose}{should the function be very verbose} + +\item{...}{additional arguments, currently ignored} } \value{ generates GDAY formatted met file as a side affect, returns file metadata diff --git a/models/gday/man/write.config.GDAY.Rd b/models/gday/man/write.config.GDAY.Rd index a014a94e23e..eaec14b8082 100644 --- a/models/gday/man/write.config.GDAY.Rd +++ b/models/gday/man/write.config.GDAY.Rd @@ -9,11 +9,11 @@ write.config.GDAY(defaults, trait.values, settings, run.id) \arguments{ \item{defaults}{list of defaults to process} +\item{trait.values}{vector of samples for a given trait} + \item{settings}{list of settings from pecan settings file} \item{run.id}{id of run} - -\item{trait.samples}{vector of samples for a given trait} } \value{ configuration file for GDAY for given run diff --git a/models/gday/tests/Rcheck_reference.log b/models/gday/tests/Rcheck_reference.log index 38b4086117a..2b67a250978 100644 --- a/models/gday/tests/Rcheck_reference.log +++ b/models/gday/tests/Rcheck_reference.log @@ -100,20 +100,7 @@ Package in Depends field not imported from: ‘PEcAn.utils’ * checking Rd cross-references ... OK * checking for missing documentation entries ... OK * checking for code/documentation mismatches ... OK -* checking Rd \usage sections ... WARNING -Undocumented arguments in documentation object 'met2model.GDAY' - ‘...’ - -Undocumented arguments in documentation object 'write.config.GDAY' - ‘trait.values’ -Documented arguments not in \usage in documentation object 'write.config.GDAY': - ‘trait.samples’ - -Functions with \usage entries need to have the appropriate \alias -entries, and all their arguments documented. -The \usage entries must correspond to syntactically valid R code. -See chapter ‘Writing R documentation files’ in the ‘Writing R -Extensions’ manual. +* checking Rd \usage sections ... OK * checking Rd contents ... OK * checking for unstated dependencies in examples ... OK * checking examples ... NONE diff --git a/models/gday/tests/testthat.R b/models/gday/tests/testthat.R index 93d4460ba89..b2350127a7c 100644 --- a/models/gday/tests/testthat.R +++ b/models/gday/tests/testthat.R @@ -1,11 +1,3 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2012 University of Illinois, NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# University of Illinois/NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- library(testthat) library(PEcAn.utils) diff --git a/models/jules/DESCRIPTION b/models/jules/DESCRIPTION index ce365a95ce4..3e6210fee3e 100644 --- a/models/jules/DESCRIPTION +++ b/models/jules/DESCRIPTION @@ -1,13 +1,10 @@ Package: PEcAn.JULES Type: Package Title: PEcAn Package for Integration of the JULES Model -Version: 1.7.2 -Date: 2021-10-04 +Version: 1.7.3.9000 Authors@R: c(person("Mike", "Dietze", role = c("aut", "cre"), email = "dietze@bu.edu"), person("University of Illinois, NCSA", role = c("cph"))) -Author: Mike Dietze -Maintainer: Mike Dietze Description: This module provides functions to link the (JULES) to PEcAn. Imports: PEcAn.data.atmosphere, @@ -25,4 +22,4 @@ Copyright: Authors LazyLoad: yes LazyData: FALSE Encoding: UTF-8 -RoxygenNote: 7.2.3 +RoxygenNote: 7.3.2 diff --git a/models/jules/LICENSE b/models/jules/LICENSE index 5a9e44128f1..09ef35a60b4 100644 --- a/models/jules/LICENSE +++ b/models/jules/LICENSE @@ -1,34 +1,3 @@ -## This is the master copy of the PEcAn License - -University of Illinois/NCSA Open Source License - -Copyright (c) 2012, University of Illinois, NCSA. All rights reserved. - -PEcAn project -www.pecanproject.org - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal with the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -- Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimers. -- Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimers in the - documentation and/or other materials provided with the distribution. -- Neither the names of University of Illinois, NCSA, nor the names - of its contributors may be used to endorse or promote products - derived from this Software without specific prior written permission. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR -ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF -CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE SOFTWARE. - +YEAR: 2024 +COPYRIGHT HOLDER: PEcAn Project +ORGANIZATION: PEcAn Project, authors affiliations diff --git a/models/jules/NEWS.md b/models/jules/NEWS.md new file mode 100644 index 00000000000..3b5ab1d8d20 --- /dev/null +++ b/models/jules/NEWS.md @@ -0,0 +1,10 @@ +# PEcAn.JULES 1.7.3.9000 + +## License change +* PEcAn.JULES is now distributed under the BSD three-clause license instead of the NCSA Open Source license. + + +# PEcAn.JULES 1.7.1 + +* All changes in 1.7.1 and earlier were recorded in a single file for all of the PEcAn packages; please see +https://github.com/PecanProject/pecan/blob/v1.7.1/CHANGELOG.md for details. diff --git a/models/jules/R/model2netcdf.JULES.R b/models/jules/R/model2netcdf.JULES.R index 6270cb6da0c..733bef29e1b 100755 --- a/models/jules/R/model2netcdf.JULES.R +++ b/models/jules/R/model2netcdf.JULES.R @@ -1,13 +1,3 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2012 University of Illinois, NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# University of Illinois/NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- - -##-------------------------------------------------------------------------------------------------# ##' Convert MODEL output into the PEcAn standard ##' ##' @name model2netcdf.JULES diff --git a/models/jules/R/version.R b/models/jules/R/version.R new file mode 100644 index 00000000000..0e58d885272 --- /dev/null +++ b/models/jules/R/version.R @@ -0,0 +1,3 @@ +# Set at package install time, used by pecan.all::pecan_version() +# to identify development versions of packages +.build_hash <- Sys.getenv("PECAN_GIT_REV", "unknown") diff --git a/models/jules/R/write.config.JULES.R b/models/jules/R/write.config.JULES.R index f68cce831ac..a4843cc7a29 100644 --- a/models/jules/R/write.config.JULES.R +++ b/models/jules/R/write.config.JULES.R @@ -1,22 +1,10 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2012 University of Illinois, NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# University of Illinois/NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- - -##-------------------------------------------------------------------------------------------------# ##' Writes a JULES config file. ##' ##' Requires a pft xml object, a list of trait values for a single model run, ##' and the name of the file to create ##' -##' @name write.config.JULES -##' @title Write JULES configuration files ##' @param defaults list of defaults to process -##' @param trait.samples vector of samples for a given trait +##' @param trait.values vector of samples for a given trait ##' @param settings list of settings from pecan settings file ##' @param run.id id of run ##' @return configuration file for JULES for given run @@ -591,14 +579,15 @@ write.config.JULES <- function(defaults, trait.values, settings, run.id) { #' Detect timestep of JULES met files #' -#' @param met.dir -#' @param met.regexp -#' @param start_date +#' @param met.dir path to directory containing met files +#' @param met.regexp pattern to match to find a met file in met.dir. +#' If more than one file matches, only the first will be used. +#' @param start_date When in file to start checking deltas. +#' Not used if timestep can be detected from the CDF header. #' #' @return a difftime object #' @export #' -#' @examples detect.timestep <- function(met.dir,met.regexp,start_date){ met.file <- dir(met.dir, pattern = met.regexp, full.names = TRUE)[1] PEcAn.logger::logger.info("Detect timestep:",met.dir,met.regexp) diff --git a/models/jules/man/detect.timestep.Rd b/models/jules/man/detect.timestep.Rd index 4ce5e36b155..05394f44327 100644 --- a/models/jules/man/detect.timestep.Rd +++ b/models/jules/man/detect.timestep.Rd @@ -7,7 +7,13 @@ detect.timestep(met.dir, met.regexp, start_date) } \arguments{ -\item{start_date}{} +\item{met.dir}{path to directory containing met files} + +\item{met.regexp}{pattern to match to find a met file in met.dir. +If more than one file matches, only the first will be used.} + +\item{start_date}{When in file to start checking deltas. +Not used if timestep can be detected from the CDF header.} } \value{ a difftime object diff --git a/models/jules/man/write.config.JULES.Rd b/models/jules/man/write.config.JULES.Rd index 57668954e59..379f67feb8c 100644 --- a/models/jules/man/write.config.JULES.Rd +++ b/models/jules/man/write.config.JULES.Rd @@ -2,26 +2,23 @@ % Please edit documentation in R/write.config.JULES.R \name{write.config.JULES} \alias{write.config.JULES} -\title{Write JULES configuration files} +\title{Writes a JULES config file.} \usage{ write.config.JULES(defaults, trait.values, settings, run.id) } \arguments{ \item{defaults}{list of defaults to process} +\item{trait.values}{vector of samples for a given trait} + \item{settings}{list of settings from pecan settings file} \item{run.id}{id of run} - -\item{trait.samples}{vector of samples for a given trait} } \value{ configuration file for JULES for given run } \description{ -Writes a JULES config file. -} -\details{ Requires a pft xml object, a list of trait values for a single model run, and the name of the file to create } diff --git a/models/jules/tests/Rcheck_reference.log b/models/jules/tests/Rcheck_reference.log index de6178b607e..9e6edfb8888 100644 --- a/models/jules/tests/Rcheck_reference.log +++ b/models/jules/tests/Rcheck_reference.log @@ -12,47 +12,9 @@ Maintainer: ‘Mike Dietze ’ New submission -License components with restrictions and base license permitting such: - BSD_3_clause + file LICENSE -File 'LICENSE': - ## This is the master copy of the PEcAn License - - University of Illinois/NCSA Open Source License - - Copyright (c) 2012, University of Illinois, NCSA. All rights reserved. - - PEcAn project - www.pecanproject.org - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal with the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - - Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimers. - - Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimers in the - documentation and/or other materials provided with the distribution. - - Neither the names of University of Illinois, NCSA, nor the names - of its contributors may be used to endorse or promote products - derived from this Software without specific prior written permission. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - IN NO EVENT SHALL THE CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR - ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF - CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE SOFTWARE. - Strong dependencies not in mainstream repositories: PEcAn.data.atmosphere, PEcAn.logger, PEcAn.remote, PEcAn.utils -The Date field is over a month old. * checking package namespace information ... OK * checking package dependencies ... OK * checking if this is a source package ... OK @@ -66,11 +28,7 @@ The Date field is over a month old. * checking installed package size ... OK * checking package directory ... OK * checking for future file timestamps ... OK -* checking DESCRIPTION meta-information ... NOTE -Author field differs from that derived from Authors@R - Author: ‘Mike Dietze’ - Authors@R: ‘Mike Dietze [aut, cre], University of Illinois, NCSA [cph]’ - +* checking DESCRIPTION meta-information ... OK * checking top-level files ... OK * checking for left-over files ... OK * checking index information ... OK @@ -95,24 +53,8 @@ Author field differs from that derived from Authors@R * checking Rd cross-references ... OK * checking for missing documentation entries ... OK * checking for code/documentation mismatches ... OK -* checking Rd \usage sections ... WARNING -Undocumented arguments in documentation object 'detect.timestep' - ‘met.dir’ ‘met.regexp’ - -Undocumented arguments in documentation object 'write.config.JULES' - ‘trait.values’ -Documented arguments not in \usage in documentation object 'write.config.JULES': - ‘trait.samples’ - -Functions with \usage entries need to have the appropriate \alias -entries, and all their arguments documented. -The \usage entries must correspond to syntactically valid R code. -See chapter ‘Writing R documentation files’ in the ‘Writing R -Extensions’ manual. -* checking Rd contents ... WARNING -Argument items with no description in Rd object 'detect.timestep': - ‘start_date’ - +* checking Rd \usage sections ... OK +* checking Rd contents ... OK * checking for unstated dependencies in examples ... OK * checking examples ... OK * checking for unstated dependencies in ‘tests’ ... OK @@ -121,4 +63,4 @@ Argument items with no description in Rd object 'detect.timestep': * checking for detritus in the temp directory ... OK * DONE -Status: 3 WARNINGs, 1 NOTEs +Status: 0 WARNINGs, 0 NOTEs diff --git a/models/jules/tests/testthat.R b/models/jules/tests/testthat.R index 060471e6cbb..ba6e49d00b7 100644 --- a/models/jules/tests/testthat.R +++ b/models/jules/tests/testthat.R @@ -1,11 +1,3 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2012 University of Illinois, NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# University of Illinois/NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- library(testthat) library(PEcAn.utils) diff --git a/models/ldndc/DESCRIPTION b/models/ldndc/DESCRIPTION index a6b59412583..4cb800e9a37 100644 --- a/models/ldndc/DESCRIPTION +++ b/models/ldndc/DESCRIPTION @@ -1,21 +1,20 @@ Package: PEcAn.LDNDC Type: Package Title: PEcAn package for integration of the LDNDC model -Version: 1.7.2 -Date: 2022-07-28 +Version: 1.0.0.9000 Authors@R: c(person("Henri", "Kajasilta", role = c("aut", "cre"), email = "henri.kajasilta@fmi.fi")) Description: This module provides functions to link the (LDNDC) to PEcAn. Imports: - PEcAn.DB, - PEcAn.logger, - PEcAn.utils (>= 1.4.8), dplyr, lubridate, ncdf4, - data.table, - PEcAn.remote, PEcAn.data.atmosphere, + PEcAn.data.land, + PEcAn.logger, + PEcAn.remote, + PEcAn.utils (>= 1.4.8), + readr, rlang Suggests: testthat (>= 1.0.2) @@ -26,4 +25,4 @@ Copyright: Authors LazyLoad: yes LazyData: FALSE Encoding: UTF-8 -RoxygenNote: 7.2.3 +RoxygenNote: 7.3.2 diff --git a/models/ldndc/LICENSE b/models/ldndc/LICENSE index 486918ebb2e..09ef35a60b4 100644 --- a/models/ldndc/LICENSE +++ b/models/ldndc/LICENSE @@ -1,3 +1,3 @@ -YEAR: 2022 +YEAR: 2024 COPYRIGHT HOLDER: PEcAn Project -ORGANIZATION: PEcAn Project, authors affiliations \ No newline at end of file +ORGANIZATION: PEcAn Project, authors affiliations diff --git a/models/ldndc/NEWS.md b/models/ldndc/NEWS.md new file mode 100644 index 00000000000..4d34a4bbfd6 --- /dev/null +++ b/models/ldndc/NEWS.md @@ -0,0 +1,8 @@ +# PEcAn.LDNDC 1.0.0.9000 + +## License change +* PEcAn.LDNDC is now distributed under the BSD three-clause license instead of the NCSA Open Source license. + +# PEcAn.LDNDC 1.0.0 + +First public release \ No newline at end of file diff --git a/models/ldndc/R/met2model.LDNDC.R b/models/ldndc/R/met2model.LDNDC.R index 0e01177f0b9..cdf29faf245 100644 --- a/models/ldndc/R/met2model.LDNDC.R +++ b/models/ldndc/R/met2model.LDNDC.R @@ -213,8 +213,8 @@ met2model.LDNDC <- function(in.path, in.prefix, outfolder, start_date, end_date, prefix_latitude <- paste0('\t latitude = "', lat, '"') prefix_longitude <- paste0('\t longitude = "', lon, '"') - data_prefix <- paste(#"%global", prefix_global, # global includes the global time, but this is already got - # from elsewhere and not necessary here. + data_prefix <- paste("%global", prefix_global, # global includes the global time, but this is already got + #from elsewhere and not necessary here(?). "%climate", prefix_climate, "%attributes", prefix_latitude, prefix_longitude, "%data \n", sep = "\n") @@ -223,13 +223,14 @@ met2model.LDNDC <- function(in.path, in.prefix, outfolder, start_date, end_date, cat(data_prefix, file = file.path(outfolder, out.file)) # For the first year, keep col.names as TRUE - data.table::fwrite(x = data, file = file.path(outfolder, out.file), - sep = "\t", col.names = T, append = T) + readr::write_delim(x = data, file = file.path(outfolder, out.file), + delim = "\t", append = T, quote = "none") + }else{ - # For the other year, col.names are FALSE - data.table::fwrite(x = data, file = file.path(outfolder, out.file), - sep = "\t", col.names = F, append = T) + # For the other years, col.names are FALSE + readr::write_delim(x = data, file = file.path(outfolder, out.file), + delim = "\t", col_names = F, append = T) } @@ -251,7 +252,7 @@ met2model.LDNDC <- function(in.path, in.prefix, outfolder, start_date, end_date, # netcdf file's starting date and simulation's starting date and converting that # difference to seconds. Returns +1 index based on the matching seconds. start_index <- function(units, start_date, sec){ - timediff <-(PEcAn.utils::datetime2cf(start_date, units, tz = "UTC"))*86400 + timediff <-round((PEcAn.utils::datetime2cf(start_date, units, tz = "UTC"))*86400) if(timediff == 0){ return(1) }else{ @@ -262,6 +263,6 @@ start_index <- function(units, start_date, sec){ end_index <- function(units, start_date, end_date, sec, tstep){ #if(lubridate::year(start_date) == lubridate::year(end_date)){ - timediff <-(PEcAn.utils::datetime2cf(end_date, units, tz = "UTC")+1)*86400 + timediff <- round((PEcAn.utils::datetime2cf(end_date, units, tz = "UTC")+1)*86400) return(which(sec == (timediff-86400/tstep))) } diff --git a/models/ldndc/R/model2netcdf.LDNDC.R b/models/ldndc/R/model2netcdf.LDNDC.R index 574bb3e1509..2c9b5f89b1e 100644 --- a/models/ldndc/R/model2netcdf.LDNDC.R +++ b/models/ldndc/R/model2netcdf.LDNDC.R @@ -19,7 +19,6 @@ ##' @author Henri Kajasilta model2netcdf.LDNDC <- function(outdir, sitelat, sitelon, start_date, end_date, delete.raw = FALSE) { - # File path to Output directory wherein the raw model results are located output_dir <- file.path(outdir, "Output") @@ -33,40 +32,66 @@ model2netcdf.LDNDC <- function(outdir, sitelat, sitelon, start_date, end_date, d PEcAn.logger::logger.info("Files with sub-daily timesteps found: ", Subdailyfiles) # Physiology data: LAI, Photosynthesis rate - physiology <- subset(read.csv(paste(output_dir, "physiology-subdaily.txt", sep = "/"), header = T, sep = "\t"), - select = c('datetime', 'lai', 'dC_co2_upt.kgCm.2.')) - } else{ - PEcAn.logger::logger.info("Files with daily timesteps used") + physiology <- subset(read.csv(file.path(output_dir, "physiology-subdaily.txt"), header = T, sep = "\t"), + select = c("datetime", "species", "lai", "dC_co2_upt.kgCm.2.", "dC_maintenance_resp.kgCm.2.", + "dC_transport_resp.kgCm.2.", "dC_growth_resp.kgCm.2.", "DW_below.kgDWm.2.", "DW_above.kgDWm.2.")) - ## Ecosystem subset - ecosystem <- subset(read.csv("ecosystem-daily.txt", header = T, sep = "\t"), - select = c('datetime', 'dC_NEE.kgCha.1.', 'C_total.kgCha.1.')) - - - ## Physiology subset - physiology <- subset(read.csv("physiology-daily.txt", header = T, sep = "\t"), - select = c('datetime', 'lai')) + soilchemistry <- subset(read.csv(file.path(output_dir, "soilchemistry-subdaily.txt"), header = T, sep ="\t"), + select = c("datetime", "sC_co2_hetero.kgCm.2.")) + + # Soil moisture information + watercycle <- subset(read.csv(file.path(output_dir, "watercycle-subdaily.txt"), header = T, sep ="\t"), + select = c("datetime", "soilwater_10cm...", "soilwater_30cm...")) + + + # Harvest + harvest <- subset(read.csv(file.path(output_dir, "report-harvest.txt"), header = T, sep ="\t"), + select = c("datetime", "dC_fru_export.kgCha.1.", "dC_fol_export.kgCha.1.", "dC_frt_export.kgCha.1.", + "dC_lst_above_export.kgCha.1.", "dC_lst_below_export.kgCha.1.", "dC_dst_above_export.kgCha.1.", + "dC_dst_below_export.kgCha.1.", "dC_straw_export.kgCha.1.")) + harvest$total <- rowSums(harvest[,-1]) + harvest <- harvest[,c("datetime", "total")] + + # Cut + cut <- subset(read.csv(paste(output_dir, "report-cut.txt", sep = "/"), header = T, sep ="\t"), + select = c("datetime", "dC_fru_export.kgCha.1.", "dC_fol_export.kgCha.1.", "dC_dfol_export.kgCha.1.", + "dC_lst_export.kgCha.1.", "dC_dst_export.kgCha.1.", "dC_frt_export.kgCha.1.")) + + cut$total <- rowSums(cut[,-1]) + cut <- cut[,c("datetime", "total")] + + } else{ + PEcAn.logger::logger.severe("Subdaily output files not found, check the configurations for the LDNDC runs") } + + # This approach should be more reliable compared to previous since just choose one unique datetime + # and the last one will be the "all", if there are several species on the field + physiology <- physiology[!duplicated(physiology$datetime, fromLast = T),] - # ldndc.out <- merge(ecosystem, physiology, by = "datetime", all = TRUE) %>% - # mutate(Date = format(as.POSIXlt(datetime, format = "%Y-%m-%d")), .keep = "unused") %>% - # mutate(Year = lubridate::year(Date), Day = strftime(Date, format = "%j"), - # Step = rep(0:(length(Date)/length(unique(Date))-1),length(unique(Date)))) %>% - # select(Year, Day, Step, dC_NEE.kgCha.1., C_total.kgCha.1., lai) + # Combine harvest and cut as one event + harvest <- rbind(harvest, cut) %>% dplyr::group_by(.data$datetime) %>% dplyr::summarise(harvest_carbon_flux = sum(.data$total)/10000) %>% + as.data.frame() # Temporary solution to get "no visible binding" note off from the variables: 'Date', 'Year' and 'Day' - Date <- Year <- Day <- NULL + Date <- Year <- Day <- Step <- NULL + + ## Merge subdaily-files + ldndc.raw.out <- merge(physiology, soilchemistry, by = 'datetime', all = TRUE) + ldndc.raw.out <- merge(ldndc.raw.out, watercycle, by = 'datetime', all = TRUE) + ldndc.raw.out <- merge(ldndc.raw.out, harvest, by = 'datetime', all = TRUE) - ldndc.out <- physiology %>% - dplyr:: mutate(Date = format(as.POSIXlt(.data$datetime, format = "%Y-%m-%d")), .keep = "unused") %>% - dplyr::slice(1:(dplyr::n()-1)) %>% # Removing one extra observation + ldndc.out <- ldndc.raw.out %>% + dplyr:: mutate(Date = format(as.POSIXct(.data$datetime, format = "%Y-%m-%d")), .keep = "unused") %>% + dplyr::slice(1:(dplyr::n()-1)) %>% # Removing one extra line in output dplyr::mutate(Year = lubridate::year(Date), Day = as.numeric(strftime(Date, format = "%j")), Step = rep(0:(length(which(Date %in% unique(Date)[1]))-1),len = length(Date))) %>% - dplyr::select('Year', 'Day', 'Step', 'lai', 'dC_co2_upt.kgCm.2.') - + dplyr::select("Year", "Day", "Step", "lai", "dC_maintenance_resp.kgCm.2.", "dC_transport_resp.kgCm.2.", + "dC_growth_resp.kgCm.2.", "dC_co2_upt.kgCm.2.", "sC_co2_hetero.kgCm.2.", + "DW_below.kgDWm.2.", "DW_above.kgDWm.2.", "soilwater_10cm...", + "soilwater_30cm...", "harvest_carbon_flux") @@ -88,10 +113,11 @@ model2netcdf.LDNDC <- function(outdir, sitelat, sitelon, start_date, end_date, d for(y in year_seq){ # if file exist and overwrite is F, then move on to the next - print(paste("---- Prosessing year: ", y)) # debugging + print(paste("---- Prosessing year: ", y)) # Subset data for prosessing - sub.ldndnc.out <- subset(ldndc.out, Year == y) + sub.ldndc.out <- subset(ldndc.out, Year == y) + @@ -109,28 +135,54 @@ model2netcdf.LDNDC <- function(outdir, sitelat, sitelon, start_date, end_date, d } ## Subset the years we are interested in - sub.ldndnc.out <- subset(sub.ldndnc.out, Day >= begin_date & Day <= end_d) - + sub.ldndc.out <- subset(sub.ldndc.out, Day >= begin_date & Day <= end_d) + # Create the tvals that are used in nc-files - tvals <- sub.ldndnc.out[["Day"]] + sub.ldndnc.out[["Step"]] /out_day -1 - - + tvals <- sub.ldndc.out[["Day"]] + sub.ldndc.out[["Step"]] /out_day -1 ## Outputs need to be an appropriate units, this can be done here output <- list() - # NEE value is on kg, so change it mg (*1 000 000), then change ha to m2 (/10 000) and then day to seconds (86400) - #output[[1]] <- sub.ldndnc.out$dC_NEE.kgCha.1. * 100 / timestep.s - - # Kilogram of total soil carbon in a m2 - #output[[2]] <- sub.ldndnc.out$C_total.kgCha.1. / 10000 # LAI - output[[1]] <- sub.ldndnc.out$lai + output[[1]] <- ifelse(!is.na(sub.ldndc.out$lai), sub.ldndc.out$lai, 0) # Photosynthesis rate - GPP - output[[2]] <- sub.ldndnc.out$dC_co2_upt.kgCm.2. + GPP <- ifelse(!is.na(sub.ldndc.out$dC_co2_upt.kgCm.2.), sub.ldndc.out$dC_co2_upt.kgCm.2./timestep.s, 0) + output[[2]] <- GPP + + # Autotrophic respiration + Autotrophic <- ifelse(!is.na((sub.ldndc.out$dC_maintenance_resp.kgCm.2. + sub.ldndc.out$dC_transport_resp.kgCm.2. + sub.ldndc.out$dC_growth_resp.kgCm.2.)), + (sub.ldndc.out$dC_maintenance_resp.kgCm.2. + sub.ldndc.out$dC_transport_resp.kgCm.2. + sub.ldndc.out$dC_growth_resp.kgCm.2.)/timestep.s, 0) + output[[3]] <- Autotrophic + + # Heterotrophic respiration + Heterotrophic <- sub.ldndc.out$sC_co2_hetero.kgCm.2./timestep.s + output[[4]] <- Heterotrophic + + # Total respiration + output[[5]] <- Autotrophic + Heterotrophic + + # NPP + output[[6]] <- GPP - Autotrophic + + # NEE + output[[7]] <- ifelse(!is.na(Autotrophic), Autotrophic, 0) + Heterotrophic - GPP + + # Soilmoisture at 10 cm + output[[8]] <- c(t(data.frame(sub.ldndc.out$soilwater_10cm..., sub.ldndc.out$soilwater_30cm...))) + + # Aboveground biomass + output[[9]] <- ifelse(!is.na(sub.ldndc.out$DW_above.kgDWm.2.), sub.ldndc.out$DW_above.kgDWm.2., 0)/timestep.s + + # Belowground biomass + # Using constant 0.45 to calculate the C from dry matter + output[[10]] <- ifelse(!is.na(sub.ldndc.out$DW_below.kgDWm.2.), sub.ldndc.out$DW_below.kgDWm.2., 0) * 0.45 / timestep.s + + harvest <- ifelse(!is.na(sub.ldndc.out$harvest_carbon_flux), sub.ldndc.out$harvest_carbon_flux, 0) * 0.45 / timestep.s + output[[11]] <- harvest + #### Declare netCDF variables #### t <- ncdf4::ncdim_def(name = "time", @@ -141,12 +193,18 @@ model2netcdf.LDNDC <- function(outdir, sitelat, sitelon, start_date, end_date, d unlim = TRUE) - lat <- ncdf4::ncdim_def("lat", "degrees_north", vals = as.numeric(sitelat), longname = "station_latitude") lon <- ncdf4::ncdim_def("lon", "degrees_east", vals = as.numeric(sitelon), longname = "station_longitude") + + + depth <- ncdf4::ncdim_def("depth", "m", vals = c(.10, .30)) + dims <- list(lon = lon, lat = lat, time = t) + dims_added <- list(lon = lon, lat = lat, depth = depth, time = t) + + #dims_daily <- list(lon = lon, lat = lat, time = t_daily) time_interval <- ncdf4::ncdim_def(name = "hist_interval", longname="history time interval endpoint dimensions", vals = 1:2, units="") @@ -155,11 +213,30 @@ model2netcdf.LDNDC <- function(outdir, sitelat, sitelon, start_date, end_date, d ## Declare netCDF variables ## nc_var <- list() - #nc_var[[1]] <- PEcAn.utils::to_ncvar("NEE", dims) - #nc_var[[2]] <- PEcAn.utils::to_ncvar("TotSoilCarb", dims) + + # Subdaily values nc_var[[1]] <- PEcAn.utils::to_ncvar("LAI", dims) nc_var[[2]] <- PEcAn.utils::to_ncvar("GPP", dims) - + nc_var[[3]] <- PEcAn.utils::to_ncvar("AutoResp", dims) + nc_var[[4]] <- PEcAn.utils::to_ncvar("HeteroResp", dims) + nc_var[[5]] <- PEcAn.utils::to_ncvar("TotalResp", dims) + nc_var[[6]] <- PEcAn.utils::to_ncvar("NPP", dims) + nc_var[[7]] <- PEcAn.utils::to_ncvar("NEE", dims) + + # Soilwater + nc_var[[8]] <- PEcAn.utils::to_ncvar("SoilMoist", dims_added) + + # Biomass aboveground and belowground + nc_var[[9]] <- ncdf4::ncvar_def("AGB", units = "kg C m-2", dim = dims, missval = -999, + longname = "above ground biomass") + nc_var[[10]] <- ncdf4::ncvar_def("below_ground_carbon_content", units = "kg C m-2", dim = dims, missval = -999, + longname = "below ground biomass") + + nc_var[[length(nc_var)+1]] <- ncdf4::ncvar_def("harvest_carbon_flux", units = "kg m-2", dim = dims, missval = -999, + longname = "biomass of harvested organs") + + # Daily values + # nc_var[[7]] <- PEcAn.utils::to_ncvar("LAI_Daily", dims_daily) ## Output netCDF data diff --git a/models/ldndc/R/version.R b/models/ldndc/R/version.R new file mode 100644 index 00000000000..0e58d885272 --- /dev/null +++ b/models/ldndc/R/version.R @@ -0,0 +1,3 @@ +# Set at package install time, used by pecan.all::pecan_version() +# to identify development versions of packages +.build_hash <- Sys.getenv("PECAN_GIT_REV", "unknown") diff --git a/models/ldndc/R/write.config.LDNDC.R b/models/ldndc/R/write.config.LDNDC.R index 13ae2ddda0c..6f13bd29ae2 100644 --- a/models/ldndc/R/write.config.LDNDC.R +++ b/models/ldndc/R/write.config.LDNDC.R @@ -18,12 +18,7 @@ write.config.LDNDC <- function(defaults, trait.values, settings, run.id) { - # To enforce minimum version to be used in simulations. - # Should not be necessary, but informs users who want to - # inspect input files closer (or do simulatons on different - # environment. Set based on current model version (1.33) - # and probably no reason to change. - MinPackReq <- "1.3" # Current version 1.33 + MinPackReq <- "1.35" # Current version 1.35 # Create Schedule time @@ -54,6 +49,18 @@ write.config.LDNDC <- function(defaults, trait.values, settings, run.id) { OutputPrefix <- file.path(outdir, "Output/") } + + # Add groundwater file, if it is available for site + # Not obligatory file for model run + if(!is.null(settings$run$inputs$groundwater$path1)){ + GroundWater = '' + groundwaterfile <- readLines(con = file.path(settings$run$inputs$groundwater$path1)) + writeLines(groundwaterfile, con = file.path(settings$rundir, run.id, "groundwater.txt")) + }else{GroundWater = ""} + + + + #----------------------------------------------------------------------- ## Fill .ldndc template with the given settings projectfile <- readLines(con = system.file("project.ldndc", package = "PEcAn.LDNDC"), n = -1) @@ -67,6 +74,8 @@ write.config.LDNDC <- function(defaults, trait.values, settings, run.id) { projectfile <- gsub('@SourcePrefix@', SourcePrefix, projectfile) projectfile <- gsub('@OutputPrefix@', OutputPrefix, projectfile) + projectfile <- gsub('@Groundwater@', GroundWater, projectfile) + # Write project file to rundir writeLines(projectfile, con = file.path(settings$rundir, run.id, "project.ldndc")) @@ -114,6 +123,7 @@ write.config.LDNDC <- function(defaults, trait.values, settings, run.id) { jobsh <- gsub("@RUNDIR@", rundir, jobsh) jobsh <- gsub("@METPATH@", MetPath, jobsh) + # LDNDC binaries in this server are located here. This binary also points to model own configurations. jobsh <- gsub("@BINARY@", paste(settings$model$binary, paste0(rundir, "/project.ldndc")), jobsh) if(is.null(settings$model$delete.raw)){ @@ -128,33 +138,144 @@ write.config.LDNDC <- function(defaults, trait.values, settings, run.id) { + ## ----- Preparing the setup file ----- ## + + ## Setup file -- This may differ based on the site properties and the ecosystem we are simulating + setupfile <- readLines(con = system.file("setup_template.xml", package = "PEcAn.LDNDC"), n = -1) + + ## Timemode + # Timemode currently supports only subdaily + timemode <- "subdaily" + setupfile <- gsub("@timemode@", timemode, setupfile) + + + ## Elevation, latitude and longitude + setupfile <- gsub("@elevation@", "10", setupfile) + setupfile <- gsub("@latitude@", settings$run$site$lat, setupfile) + setupfile <- gsub("@longitude@", settings$run$site$lon, setupfile) + + + ## Check the site id + site_id <- settings$run$site$id + + + + ## Handle the setups, when working with grass, crop and forest fields + # Possibly to hard code species to the list, this differentiation is done only + # for the purpose of separating the setups between forest and grassland/crops + # If more species is wanted to be add, update also the part where parameters of pfts are written into speciesparameter file. + # Available species for grass/crops: timothy, oat and barley + # Available species for forest: pipy + pfts_grasscrops <- c("barley", "oat", "triticale", "timothy", "meadow", "soil") + pfts_forest <- c("pipy") + pfts_run <- NULL + for(pft_names in 1:length(settings$pfts)){ + pfts_run <- c(pfts_run, settings$pfts[[pft_names]]$name) + } + + + # Setup file created for grass and crop simulations: + if(all(pfts_run %in% pfts_grasscrops)){ + + ## Modules + # Microclimate module + setupfile <- gsub("@microclimate@", "canopyecm", setupfile) + + # Watercycle module and option + setupfile <- gsub("@watercycle@", "watercycledndc", setupfile) + setupfile <- gsub("@pevapotrans@", "penman", setupfile) + + # Airchemistry module + setupfile <- gsub("@airchemistry@", "airchemistrydndc", setupfile) + + # Physiology module + setupfile <- gsub("@physiology@", "plamox", setupfile) + setupfile <- gsub("@plantfamilies@", "crops grass", setupfile) + + # Soil modules and options + setupfile <- gsub("@soilchemistry@", "metrx", setupfile) + + # Report + setupfile <- gsub("@reportarable@", "", setupfile) + + # Write the populated setup file as an xml-file + writeLines(setupfile, con = file.path(settings$rundir, run.id, "setup.xml")) + + } + + # Setup file created for forest simulations + else if(all(pfts_run %in% pfts_forest)){ + + ## Modules + # Microclimate module + setupfile <- gsub("@microclimate@", "canopyecm", setupfile) + + # Watercycle module and option + setupfile <- gsub("@watercycle@", "echy", setupfile) + setupfile <- gsub("@pevapotrans@", "penman", setupfile) + + # Airchemistry module + setupfile <- gsub("@airchemistry@", "airchemistrydndc", setupfile) + + # Physiology module + setupfile <- gsub("@physiology@", "psim", setupfile) + + # Soil modules and options + setupfile <- gsub("@soilchemistry@", "metrx", setupfile) + + # Report + setupfile <- gsub("@reportarable@", "\n", setupfile) + + # Write the populated setup file as an xml-file + writeLines(setupfile, con = file.path(settings$rundir, run.id, "setup.xml")) + + + } + + # Given pfts were not among the supported species + else{ + PEcAn.logger::logger.severe("Given species are not currently supported. This can be fixed by updating the write.config.LDNDC.R file.") + } + + + ## ----- Fetching other site specific file templates ----- ## + + ### Event, site and airchemistry files ### + + # Fetch event file from the given path, this might be modified, if initial + # conditions are given, check the part of handling initial conditions later on + eventsfile <- readLines(con = file.path(settings$run$inputs$events$path1)) + + # Fetch default site file. Will also be populated based on the given initial conditions + sitefile <- readLines(con = system.file("site_template.xml", package = "PEcAn.LDNDC"), n = -1) + + # Use airchemistry file, which represents Finland + if(!is.null(settings$run$inputs$airchemistry$path1)){ + airchemistryfile <- readLines(con = file.path(settings$run$inputs$airchemistry$path1)) + } else{ + airchemistryfile <- readLines(con = system.file("airchemistry.txt", package = "PEcAn.LDNDC"), n = -1) + } + + #### write run-specific PFT parameters here #### Get parameters being handled by PEcAn + # For species, read the speciesparameters template speciesparfile <- readLines(con = system.file("speciesparameter_template.xml", package = "PEcAn.LDNDC"), n = -1) + # For site (parameters), read the siteparameters template + siteparfile <- readLines(con = system.file("siteparameters_template.xml", package = "PEcAn.LDNDC"), n = -1) + #---------------------- ## Set-up the necessary files in to the run directory so ## model is able to function properly. Later on, these ## files should be populated with initial values. - - - ###### THIS NEEDS TO BE FUNCTION AT SOME POINT - ### PROBABLY SIMILAR FUNCTION FOR siteparameters as well - # - mnemonic_1 <- "__grasses__" - group <- "grass" - mnemonic_2 <- "perennialgrass" - a.1 <- paste0(" \n") - b.1 <- paste0("\t\t\t\t\t \n") + # Species and Siteparameters b.2 <- "" - # Keep old version as a reference this need to reconstruct at some point properly anyway - #b.2 <- apply(trait.values[[1]], 1, function(x){paste0("\t\t\t\t\t\t \n")}) - b.3 <- paste0("\t\t\t\t \n") - a.2 <- paste0("\t\t\t") - + h.2 <- "" + species_par_values <- list() for (pft in seq_along(trait.values)) { @@ -163,99 +284,1837 @@ write.config.LDNDC <- function(defaults, trait.values, settings, run.id) { # Number at the beginning refers to the number of species parameters in LDNDC guide book. + # NOTE! LDNDC Userguide has been updated later on so the numbering can be a little bit off compared + # to the latest version. # First there is name in LDNDC and the second is name in BETY database + + #8 NDFLUSH - + if ("ndflush" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #9 NDMORTA - + if ("ndmorta" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #10 DLEAFSHED - + if ("dleafshed" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #12 AEJM J/mol - + if ("aejm" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #13 AEKC J/mol - + if ("aekc" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #14 AEKO J/mol - + if ("aeko" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #15 AERD J/mol - + if ("aerd" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #16 AEVC J/mol - + if ("aevc" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #17 AEVO J/mol - + if ("aevo" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + #18 ALB (-) - SW_albedo (-) if ("SW_albedo" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #21 AMAXA (-) - + if ("amaxa" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #22 AMAXB (-) - Amax (-) + if ("Amax" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #23 AMAXFRAC - + if ("amaxfrac" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #24 BASEFOLRESPFRAC - + if ("basefolrespfrac" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #25 CB - + if ("cb" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #26 CDAMP - + if ("cdamp" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #27 CL_P1 - + if ("cl_p1" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #28 CL_P2 - + if ("cl_p2" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #32 CELLULOSE - + if ("cellulose" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #34 CHILL_UNITS - + if ("chill_units" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #35 CHILL_TEMP_MAX - + if ("chill_temp_max" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #36 CT_IS - + if ("ct_is" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") } - #34 DIAMMAX (m) - stem_diameter (cm) + #37 CT_MT - + if ("ct_mt" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #38 DBRANCH kg/m3 - + if ("dbranch" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #39 DF_EXP - + if ("df_exp" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #40 DF_LIMIT m2/ha - + if ("df_limit" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #41 DFOL - leaf_density + if ("leaf_density" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #42 DFRTOPT - + if ("dfrtopt" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #43 DIAMMAX (m) - stem_diameter (cm) if ("stem_diameter" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t\t\t \n"), collapse="") + ),"' /> \n"), collapse="") + } + + #44 DOC_RESP_RATIO - coarseRootExudation + if ("coarseRootExudation" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t\t \n"), collapse="") + } + + #45 DRAGC - + if ("dragc" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t\t \n"), collapse="") + } + + #46 DSAP - + if ("dsap" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t\t \n"), collapse="") + } + + #47 DS_IS J/mol K - + if ("ds_is" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t\t \n"), collapse="") + } + + #48 DS_MT J/mol K - + if ("ds_mt" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t\t \n"), collapse="") + } + + #49 DVPD1 - + if ("dvpd1" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t\t \n"), collapse="") + } + + #50 DVPD2 - + if ("dvpd2" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t\t \n"), collapse="") + } + + #54 EF_OVOC ug/gDW h - + if ("ef_ovoc" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t\t \n"), collapse="") + } + + #55 EXPL_NH4 - + if ("expl_nh4" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t\t \n"), collapse="") + } + + #56 EXPL_NO3 - + if ("expl_no3" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t\t \n"), collapse="") + } + + #57 EXP_ROOT_DISTRIBUTION - + if ("exp_root_distribution" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t\t \n"), collapse="") } #58 EXT - extinction_coefficient_diffuse if ("extinction_coefficient_diffuse" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #59 FAGE - + if ("fage" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #62 FFACMAX - + if ("ffacmax" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #78 FOLRELGROMAX - + if ("folrelgromax" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") } #79 FRACTION_ROOT - root_biomass_fraction if ("root_biomass_fraction" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #80 FRACTION_FRUIT - + if ("fraction_fruit" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #81 FRACTION_FOLIAGE - + if ("fraction_foliage" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #82 FRET_N - + if ("fret_n" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #86 FRTALLOC_REL - + if ("frtalloc_rel" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #87 FRTLOSS_SCALE - + if ("frtloss_scale" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #88 FYIELD - + if ("fyield" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") } #89 GDD_BASE_TEMPERATURE (C) - gdd_tbase (C) if ("gdd_tbase" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") } - #167 PSNTMAX (C) - pstemp_max (C) - if ("pstemp_max" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t\t\t \n"), collapse="") + #90 GDD_MAX_TEMPERATURE - gdd_tmax + if ("gdd_tmax" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") } - #168 PSNTMIN (C) - pstemp_min (C) - if ("pstemp_min" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t\t\t \n"), collapse="") + #91 GDD_EMERGENCE - + if ("gdd_emergence" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") } - #169 PSNTOPT (C) - psnTOpt (C) - if ("psnTOpt" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t\t\t \n"), collapse="") + #92 GDD_STEM_ELONGATION - + if ("gdd_stem_elongation" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") } - #193 SLAMAX (m2 kg-1) - SLAMAX (m2 kg-1) - if ("SLAMAX" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t\t\t \n"), collapse="") + #93 GDD_FLOWERING - + if ("gdd_flowering" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") } - #194 SLAMIN (m2 kg-1) - SLAMIN (m2 kg-1) - if ("SLAMIN" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t\t\t \n"), collapse="") + # #94 GDD_GRAIN_FILLING - GRAIN FILLING RELATIVE TO FLOWERING + # if ("gdd_grain_filling" %in% pft.names) { + # b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + # } + + #95 GDD_MATURITY - + if ("gdd_maturity" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") } - } - - speciesparfile <- gsub("@Info@", paste(a.1,b.1,b.2,b.3,a.2), speciesparfile) - - writeLines(speciesparfile, con = file.path(settings$rundir, run.id, "speciesparameters.xml")) - - - - # Default events file, need to change later on. Only takes care of some initial biomass and - # then some random events. Not made for real use, but testing. - eventsfile <- readLines(con = system.file("events_template.xml", package = "PEcAn.LDNDC"), n = -1) - - eventsfile <- gsub("@Startdate@", as.Date(settings$run$start.date, format = "%Y/%m/%d"), eventsfile) - eventsfile <- gsub("@Event_1_Time@", as.Date(settings$run$start.date, format = "%Y/%m/%d") + sample(120:160, 1), eventsfile) - eventsfile <- gsub("@Event_2_Time@", as.Date(settings$run$start.date, format = "%Y/%m/%d") + sample(170:180, 1), eventsfile) - eventsfile <- gsub("@Event_3_Time@", as.Date(settings$run$start.date, format = "%Y/%m/%d") + sample(250:300, 1), eventsfile) - - writeLines(eventsfile, con = file.path(settings$rundir, run.id, "events.xml")) - - - - # Default setup file, need to change later on - setupfile <- readLines(con = system.file("setup.xml", package = "PEcAn.LDNDC"), n = -1) - writeLines(setupfile, con = file.path(settings$rundir, run.id, "setup.xml")) - - - # Default site file, need to change later on - sitefile <- readLines(con = system.file("site.xml", package = "PEcAn.LDNDC"), n = -1) - writeLines(sitefile, con = file.path(settings$rundir, run.id, "site.xml")) - - # Default site file, need to change later on - siteparfile <- readLines(con = system.file("siteparameters.xml", package = "PEcAn.LDNDC"), n = -1) - writeLines(siteparfile, con = file.path(settings$rundir, run.id, "siteparameters.xml")) + #96 GDDFOLEND - + if ("gddfolend" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #97 GDDFOLSTART - + if ("gddfolstart" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #100 GGDPS_B (umol L-1 s-1) - + if ("ggdps_b" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #101 GSMAX (mmolH2O m-2 s-1) - + if ("gsmax" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #102 GSMIN (mmolH2O m-2 s-1) - + if ("gsmin" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #103 GZRTZ - + if ("gzrtz" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #104 H2OREF_A - + if ("h2oref_a" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #107 H2OREF_GS - + if ("h2oref_gs" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #109 HALFSAT - + if ("halfsat" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #110 HA_IS (J mol-1) - + if ("ha_is" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #111 HA_MT (J mol-1) - + if ("ha_mt" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #112 HD_IS (J mol-1) - + if ("hd_is" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #113 HDJ - + if ("hdj" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #114 HD_EXP - + if ("hd_exp" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #115 HD_MAX (m m-1) - + if ("hd_max" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #116 HD_MIN (m m-1) - + if ("hd_min" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #117 HD_MT (J mol-1) - + if ("hd_mt" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #118 HREF - + if ("href" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #119 INI_N_FIX - + if ("ini_n_fix" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #120 KC25 (mmol mol-1 mbar-1)- + if ("kc25" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #121 KM20 - + if ("km20" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #126 K_MM_NITROGEN_UPTAKE - + if ("k_mm_nitrogen_uptake" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #127 KO25 - + if ("ko25" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #128 KRC_WOOD - + if ("krc_wood" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #129 LIGNIN - + if ("lignin" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #130 MAINTENANCE_TEMP_REF - + if ("maintenance_temp_ref" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #131 MC_LEAF - + if ("mc_leaf" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #132 MC_STEM - + if ("mc_stem" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #133 MC_ROOT - + if ("mc_root" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #134 MC_STORAGE - + if ("mc_storage" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #135 MFOLOPT - + if ("mfolopt" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #136 M_FRUIT_OPT - + if ("m_fruit_opt" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #139 MUE_IS (s-1) - + if ("mue_is" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #140 MUE_MT (s-1) - + if ("mue_mt" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #141 MWFM - + if ("mwfm" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #143 NC_FOLIAGE_MIN - + if ("nc_foliage_min" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #144 NC_FOLIAGE_MAX - + if ("nc_foliage_max" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #145 NCFOLOPT (kg kg-1) - + if ("ncfolopt" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #146 NC_FINEROOTS_MAX - + if ("nc_fineroots_max" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #147 NC_FINEROOTS_MIN - + if ("nc_fineroots_min" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #148 NC_FRUIT_MAX - + if ("nc_fruit_max" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #149 NC_FRUIT_MIN - + if ("nc_fruit_min" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #150 NC_STRUCTURAL_TISSUE_MAX - + if ("nc_structural_tissue_max" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #151 NC_STRUCTURAL_TISSUE_MIN - + if ("nc_structural_tissue_min" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #152 NCSAPOPT (kg kg-1) - + if ("ncsapopt" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #153 N_DEF_FACTOR - + if ("n_def_factor" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #154 N_DEMAND_VEG - + if ("n_demand_veg" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #155 N_DEMAND_REPROD - + if ("n_demand_reprod" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #156 NFIX_CEFF - + if ("nfix_ceff" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #157 NFIX_TMAX - + if ("nfix_tmax" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #158 NFIX_TOPT - + if ("nfix_topt" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #159 NFIX_TMIN - + if ("nfix_tmin" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #160 NFIX_W - + if ("nfix_w" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #161 NFIX_RATE (kg N kg-1 DM-1 d-1) - + if ("nfix_rate" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #163 PEXS - + if ("pexs" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #164 PFL - + if ("pfl" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #165 PSL - + if ("psl" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #167 PSNTMAX (C) - pstemp_max (C) + if ("pstemp_max" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #168 PSNTMIN (C) - pstemp_min (C) + if ("pstemp_min" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #169 PSNTOPT (C) - psnTOpt (C) + if ("psnTOpt" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #170 QHRD - + if ("qhrd" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #171 QJVC - + if ("qjvc" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #172 QRD25 (umol m-2 s-1) - + if ("qrd25" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #173 QRF - + if ("qrf" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #174 QSF_P1 (m2 cm-2) - + if ("qsf_p1" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #175 QSF_P2 (m2 cm-2) - + if ("qsf_p2" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #176 QVOVC - + if ("qvovc" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #177 QWODFOLMIN - + if ("qwodfolmin" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #178 RBUDDEM - + if ("rbuddem" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + + #179 RESP - resp + if ("resp" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #180 RESPQ10 - leaf_respiration_Q10 + if ("leaf_respiration_Q10" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #181 ROOTMRESPFRAC - + if ("rootmrespfrac" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #182 RS_CONDUCT - + if ("rs_conduct" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #183 SCALE_I - + if ("scale_i" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #184 SCALE_M - + if ("scale_m" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #185 SDJ - + if ("sdj" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #186 SENESCENCE_AGE - + if ("senescence_age" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #187 SENESCENCE_DROUGHT - + if ("senescence_drought" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") # In order to have zeros + } + + #188 SENESCENCE_FROST - + if ("senescence_frost" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #?? SENESCSTART - + if ("senescstart" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #192 SHOOT_STIMULATION_REPROD - + if ("shoot_stimulation_reprod" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #193 SLAMAX (m2 kg-1) - SLAMAX (m2 kg-1) + if ("SLAMAX" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #194 SLAMIN (m2 kg-1) - SLAMIN (m2 kg-1) + if ("SLAMIN" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #195 SLADECLINE - + if ("sladecline" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #196 SLOPE_GSA - + if ("slope_gsa" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #197 SLOPE_GSCO2 - + if ("slope_gsco2" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #198 SLOPE_GSH2O - + if ("slope_gsh2o" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #199 SLOPE_NC - + if ("slope_nc" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #200 TAP_P1 - + if ("tap_p1" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #201 TAP_P2 - + if ("tap_p2" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #202 TAP_P3 - + if ("tap_p3" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #203 TAU - + if ("tau" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #204 THETA - + if ("theta" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #205 TLIMIT - + if ("tlimit" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #206 TOFRTBAS - + if ("tofrtbas" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #207 TOSAPMAX - + if ("tosapmax" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #208 UCMAX (kgN m-2 leaf area) - + if ("ucmax" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #210 US_NH4 (kgN kg-1 fine root dry weight day-1) - + if ("us_nh4" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #211 US_NH4MYC (kgN kg-1 fine root dry weight day-1) - + if ("us_nh4myc" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #213 US_NO3 (kgN kg-1 fine root dry weight day-1) - + if ("us_no3" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #214 US_NO3MYC (kgN kg-1 fine root dry weight day-1) - + if ("us_no3myc" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #215 VCFACT - + if ("vcfact" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #216 VCMAX25 - + if ("vcmax25" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #217 VPDREF (kPa) - + if ("vpdref" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #219 WOODMRESPA - + if ("woodmrespa" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #220 WUECMAX - wuecmax + if ("wuecmax" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #221 WUECMIN - wuecmin + if ("wuecmin" %in% pft.names) { # CHECK THAT THE VALUE IS NOT OVER MAX + wuecmin_val <- ifelse(pft.traits[which(pft.names == "wuecmin")] > pft.traits[which(pft.names == "wuecmax")], pft.traits[which(pft.names == "wuecmax")], pft.traits[which(pft.names == "wuecmin")]) + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #222 ZRTMC - + if ("zrtmc" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + #??? HEIGHT_MAX - + if ("height_max" %in% pft.names) { + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + } + + ## SITEPARAMETERS + # Number at the beginning refers to the number of site parameters in LDNDC guide book. + + + #58 EVALIM + if ("evalim" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + #82 GROUNDWATER_NUTRIENT_RENEWAL - + if ("groundwater_nutrient_renewal" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + #121 METRX_AMAX - + if ("metrx_amax" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + #122 METRX_AMAX_ALGAE - + if ("metrx_amax_algae" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + #123 METRX_BETA_LITTER_TYPE - + if ("metrx_beta_litter_type" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + #124 METRX_BIOSYNTH_EFF - + if ("metrx_biosynth_eff" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + #125 METRX_CN_ALGAE - + if ("metrx_cn_algae" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + #127 METRX_CN_MIC_MAX - + if ("metrx_cn_mic_max" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + #128 METRX_CN_MIC_MIN - + if ("metrx_cn_mic_min" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + #129 METRX_CO2_PROD_DECOMP - + if ("metrx_co2_prod_decomp" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + #130 METRX_D_EFF_REDUCTION - + if ("metrx_d_eff_reduction" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + #131 METRX_F_CHEMODENIT_PH_ONEILL_1 - + if ("metrx_f_chemodenit_ph_oneill_1" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + #132 METRX_F_CHEMODENIT_PH_ONEILL_2 - + if ("metrx_f_chemodenit_ph_oneill_2" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + #133 METRX_F_CHEMODENIT_PH_ONEILL_3 - + if ("metrx_f_chemodenit_ph_oneill_3" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + #134 METRX_F_CHEMODENIT_T_EXP_1 - + if ("metrx_f_chemodenit_t_exp_1" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + #135 METRX_F_CHEMODENIT_T_EXP_2 - + if ("metrx_f_chemodenit_t_exp_2" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + #136 METRX_F_DECOMP_M_WEIBULL_1 - + if ("metrx_f_decomp_m_weibull_1" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + #137 METRX_F_DECOMP_M_WEIBULL_2 - + if ("metrx_f_decomp_m_weibull_2" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + #138 METRX_F_DECOMP_M_WEIBULL_3 - + if ("metrx_f_decomp_m_weibull_3" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + #139 METRX_F_CH4_OXIDATION_T_EXP_1 - + if ("metrx_f_ch4_oxidation_t_exp_1" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + #140 METRX_F_CH4_OXIDATION_T_EXP_2 - + if ("metrx_f_ch4_oxidation_t_exp_2" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + #141 METRX_F_CH4_PRODUCTION_T_EXP_1 - + if ("metrx_f_ch4_production_t_exp_1" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + #142 METRX_F_CH4_PRODUCTION_T_EXP_2 - + if ("metrx_f_ch4_production_t_exp_2" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + #143 METRX_F_DECOMP_T_EXP_1 - + if ("metrx_f_decomp_t_exp_1" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + #144 METRX_F_DECOMP_T_EXP_2 - + if ("metrx_f_decomp_t_exp_2" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + #145 METRX_F_DECOMP_CLAY_1 - + if ("metrx_f_decomp_clay_1" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + #146 METRX_F_DECOMP_CLAY_2 - + if ("metrx_f_decomp_clay_2" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + #147 METRX_F_DENIT_N2_MIN - + if ("metrx_f_denit_n2_min" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + #148 METRX_F_DENIT_N2_MAX - + if ("metrx_f_denit_n2_max" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + #149 METRX_F_DENIT_NO - + if ("metrx_f_denit_no" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + #150 METRX_F_DENIT_PH_EXP_1 - + if ("metrx_f_denit_ph_exp_1" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + #151 METRX_F_DENIT_PH_EXP_2 - + if ("metrx_f_denit_ph_exp_2" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + #152 METRX_F_DENIT_M_WEIBULL_1 - + if ("metrx_f_denit_m_weibull_1" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + #153 METRX_F_DENIT_M_WEIBULL_2 - + if ("metrx_f_denit_m_weibull_2" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + #154 METRX_F_N_ALGAE - + if ("metrx_f_n_algae" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + #155 METRX_F_N_CH4_OXIDATION - + if ("metrx_f_n_ch4_oxidation" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + #156 METRX_F_NIT_NO_M_EXP_1 - + if ("metrx_f_nit_no_m_exp_1" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + #157 METRX_F_NIT_NO_M_EXP_2 - + if ("metrx_f_nit_no_m_exp_2" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + #158 METRX_F_NIT_NO_T_EXP_1 - + if ("metrx_f_nit_no_t_exp_1" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + #159 METRX_F_NIT_NO_T_EXP_2 - + if ("metrx_f_nit_no_t_exp_2" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + #160 METRX_F_NIT_NO_PH_LIN_1 - + if ("metrx_f_nit_no_ph_lin_1" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + #161 METRX_F_NIT_NO_PH_LIN_2 - + if ("metrx_f_nit_no_ph_lin_2" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + #162 METRX_F_NIT_N2O_M_WEIBULL_1 - + if ("metrx_f_nit_n2o_m_weibull_1" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + #163 METRX_F_NIT_N2O_M_WEIBULL_2 - + if ("metrx_f_nit_n2o_m_weibull_2" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + #164 METRX_F_NIT_N2O_M_WEIBULL_3 - + if ("metrx_f_nit_n2o_m_weibull_3" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + #165 METRX_F_NIT_N2O_T_EXP_1 - + if ("metrx_f_nit_n2o_t_exp_1" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + #166 METRX_F_NIT_N2O_T_EXP_2 - + if ("metrx_f_nit_n2o_t_exp_2" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + #167 METRX_F_NIT_PH_ONEILL_1 - + if ("metrx_f_nit_ph_oneill_1" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + #168 METRX_F_NIT_PH_ONEILL_2 - + if ("metrx_f_nit_ph_oneill_2" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + #169 METRX_F_NIT_PH_ONEILL_3 - + if ("metrx_f_nit_ph_oneill_3" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + #170 METRX_FE_REDUCTION - + if ("metrx_fe_reduction" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + #171 METRX_FRAC_FE_CH4_PROD - + if ("metrx_frac_fe_ch4_prod" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + #172 METRX_MAX_DEPTH_DENIT - + if ("metrx_max_depth_denit" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + #173 METRX_MIC_EFF - + if ("metrx_mic_eff" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + #174 METRX_MIC_EFF_METANE_OX - + if ("metrx_mic_eff_metane_ox" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + #175 METRX_MUEMAX_C_ALGAE - + if ("metrx_muemax_c_algae" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + #176 METRX_MUEMAX_C_CH4_OX - + if ("metrx_muemax_c_ch4_ox" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + #177 METRX_MUEMAX_C_CH4_PROD - + if ("metrx_muemax_c_ch4_prod" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + #178 METRX_MUEMAX_C_DENIT - + if ("metrx_muemax_c_denit" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + #179 METRX_MUEMAX_C_FERM - + if ("metrx_muemax_c_ferm" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + #180 METRX_MUEMAX_C_NIT - + if ("metrx_muemax_c_nit" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + #181 METRX_MUEMAX_C_FE_RED - + if ("metrx_muemax_c_fe_red" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + #182 METRX_MUEMAX_H2_CH4_PROD - + if ("metrx_muemax_h2_ch4_prod" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + #183 METRX_MUEMAX_N_ASSI - + if ("metrx_muemax_n_assi" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + #184 METRX_NITRIFY_MAX - + if ("metrx_nitrify_max" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + #185 METRX_KF_NIT_NO - + if ("metrx_kf_nit_no" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + #186 METRX_KF_NIT_N2O - + if ("metrx_kf_nit_n2o" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + #187 METRX_K_O2_CH4_PROD - + if ("metrx_k_o2_ch4_prod" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + #188 METRX_K_O2_FE_RED - + if ("metrx_k_o2_fe_red" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + #189 METRX_KF_FE_FE_RED - + if ("metrx_kf_fe_fe_red" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + #190 METRX_KMM_AC_CH4_PROD - + if ("metrx_kmm_ac_ch4_prod" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + #191 METRX_KMM_AC_FE_RED - + if ("metrx_kmm_ac_fe_red" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + #192 METRX_KMM_H2_FE_RED - + if ("metrx_kmm_h2_fe_red" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + #193 METRX_KMM_C_DENIT - + if ("metrx_kmm_c_denit" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + #194 METRX_KMM_CH4_CH4_OX - + if ("metrx_kmm_ch4_ch4_ox" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + #195 METRX_KMM_C_MIC - + if ("metrx_kmm_c_mic" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + #196 METRX_KMM_O2_NIT - + if ("metrx_kmm_o2_nit" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + #197 METRX_KMM_H2_FERM - + if ("metrx_kmm_h2_ferm" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + #198 METRX_KMM_H2_CH4_PROD - + if ("metrx_kmm_h2_ch4_prod" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + #201 METRX_KMM_N_DENIT - + if ("metrx_kmm_n_denit" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + #202 METRX_KMM_N_MIC - + if ("metrx_kmm_n_mic" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + #203 METRX_KMM_NH4_NIT - + if ("metrx_kmm_nh4_nit" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + #204 METRX_KMM_NO2_NIT - + if ("metrx_kmm_no2_nit" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + #205 METRX_KMM_O2_CH4_OX - + if ("metrx_kmm_o2_ch4_ox" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + #206 METRX_KMM_FE_OX - + if ("metrx_kmm_fe_ox" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + #207 METRX_KMM_PH_INCREASE_FROM_UREA - + if ("metrx_kmm_ph_increase_from_urea" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + #208 METRX_KR_ANVF_DIFF_GAS - + if ("metrx_kr_anvf_diff_gas" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + #209 METRX_KR_ANVF_DIFF_LIQ - + if ("metrx_kr_anvf_diff_liq" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + #210 METRX_KR_DC_ALGAE - + if ("metrx_kr_dc_algae" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + #211 METRX_KR_DC_AORG - + if ("metrx_kr_dc_aorg" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + #212 METRX_KR_DC_CEL - + if ("metrx_kr_dc_cel" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + #213 METRX_KR_DC_HUM_0 - + if ("metrx_kr_dc_hum_0" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + #214 METRX_KR_DC_HUM_1 - + if ("metrx_kr_dc_hum_1" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + #215 METRX_KR_DC_HUM_2 - + if ("metrx_kr_dc_hum_2" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + #216 METRX_KR_DC_LIG - + if ("metrx_kr_dc_lig" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + #217 METRX_KR_DC_RAW_LITTER - + if ("metrx_kr_dc_raw_litter" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + #218 METRX_KR_DC_SOL - + if ("metrx_kr_dc_sol" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + #219 METRX_KR_DC_WOOD - + if ("metrx_kr_dc_wood" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + #220 METRX_KR_DENIT_CHEMO - + if ("metrx_kr_denit_chemo" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + #221 METRX_KR_FRAC_FRAG_ABOVE - + if ("metrx_kr_frac_frag_above" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + #223 METRX_KR_OX_FE - + if ("metrx_kr_ox_fe" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + #224 METRX_KR_HU_AORG_HUM_0 - + if ("metrx_kr_hu_aorg_hum_0" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + #225 METRX_KR_HU_AORG_HUM_1 - + if ("metrx_kr_hu_aorg_hum_1" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + #227 METRX_KR_HU_SOL - + if ("metrx_kr_hu_sol" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + #229 METRX_KR_HU_HUM_0 - + if ("metrx_kr_hu_hum_0" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + #230 METRX_KR_HU_HUM_1 - + if ("metrx_kr_hu_hum_1" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + #231 METRX_KR_HU_LIG - + if ("metrx_kr_hu_lig" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + #233 METRX_KR_REDUCTION_CN - + if ("metrx_kr_reduction_cn" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + #234 METRX_KR_REDUCTION_ANVF - + if ("metrx_kr_reduction_anvf" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + #235 METRX_KR_REDUCTION_CONIFEROUS - + if ("metrx_kr_reduction_coniferous" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + #236 METRX_LIG_HUMIFICATION - + if ("metrx_lig_humification" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + #237 METRX_RET_HUMUS - + if ("metrx_ret_humus" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + #238 METRX_RET_LITTER - + if ("metrx_ret_litter" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + #239 METRX_RET_MICROBES - + if ("metrx_ret_microbes" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + #240 METRX_TILL_STIMULATION_1 - + if ("metrx_till_stimulation_1" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + #241 METRX_TILL_STIMULATION_2 - + if ("metrx_till_stimulation_2" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + #242 METRX_V_EBULLITION - + if ("metrx_v_ebullition" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + #303 RETDOC - + if ("retdoc" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + #304 RETNO3 - + if ("retno3" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + #317 TEXP - + if ("texp" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + + # SOILWATER RELATED + + #2 BY_PASSF - + if ("by_passf" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + #69 FPERCOL - + if ("fpercol" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + #80 FRUNOFF - + if ("frunoff" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + #84 IMPEDANCE_PAR - + if ("impedance_par" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + #307 ROOT_DEPENDENT_TRANS - + if ("root_dependent_trans" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + # 349 WCDNDC_EVALIM_FRAC_WCMIN + if ("wcdndc_evalim_frac_wcmin" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + #335 WCDNDC_INCREASE_POT_EVAPOTRANS - + if ("wcdndc_increase_pot_evapotrans" %in% pft.names) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + + + + + # Assigning pft values + species_par_values[names(trait.values)[pft]] <- b.2 + + b.2 <- "" + + + } + + + ## INITIAL SOIL CONDITIONS + soil_layer <- list() + + + ## Soil layers, if not external files are given + if(is.null(settings$run$inputs$poolinitcond$path)){ + + # Set different layers, which will be used based on the soil data that is available + # For example, if we have soil data for top layer, then that will be used instead of soil_layer_1 + soil_layer[1] <- '' + soil_layer[2] <- '' + soil_layer[3] <- '' + soil_layer[4] <- '' + soil_layer[5] <- '' + soil_layer[6] <- '' + soil_layer[7] <- '' + + soil_layer_values <- paste(soil_layer, collapse = "\n \t") + } + + + ## One soil layer is given + else if(!is.null(settings$run$inputs$poolinitcond$path)){ + # Set empty + soil_all_block <- NULL + + # Reading soil file + soil_IC_list <- PEcAn.data.land::pool_ic_netcdf2list(settings$run$inputs$poolinitcond$path) + + + + ## --- Initial condtions for the site --- ## + + # Before moving to write site file, check siteparameter initial conditions and site initial condition + + ## Siteparameter file + #300 RCNM - + # C:N ratio of humus + if ("c2n_humus" %in% names(soil_IC_list$vals)) { + h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + } + + ## Event file + # Populate the events file, if there are placeholders for initial biomasses or fractional cover + # Initial biomass in the field + if(any(grepl("@InitialBiomass@", eventsfile))){ + if ("AGB" %in% names(soil_IC_list$vals)) { + initialbiomass <- round( PEcAn.utils::ud_convert(unlist(soil_IC_list$vals["AGB"])[[1]], "kg m-2", "kg ha-1"), 1 ) + } + else{ + initialbiomass <- 100 + } + # Fill in the value + eventsfile <- gsub("@InitialBiomass@", paste0("'", initialbiomass, "'"), eventsfile) + } + # Fractional cover of the plants + if(any(grepl("@FractionalCover@", eventsfile))){ + if ("fractional_cover" %in% names(soil_IC_list$vals)) { + fractionalcover <- unlist(soil_IC_list$vals["fractional_cover"])[[1]] + } + else{ + fractionalcover <- 0.5 + } + # Fill in the value + eventsfile <- gsub("@FractionalCover@", paste0("'", fractionalcover, "'"), eventsfile) + } + + + ## Site file (general) + # Soil use history + if(any(grepl("@Info_Use_History@", sitefile))){ + if ("history" %in% names(soil_IC_list$vals)){ + soil_use_history <- unlist(soil_IC_list$vals["history"])[[1]] + } + else{ + soil_use_history <- "arable" + } + sitefile <- gsub("@Info_Use_History@", paste0("'", soil_use_history, "'"), sitefile) + } + + # Soil type + if(any(grepl("@Soil_Type@", sitefile))){ + if ("soil_type" %in% names(soil_IC_list$vals)){ + soil_type <- unlist(soil_IC_list$vals["soil_type"])[[1]] + } + else{ + soil_type <- "SALO" + } + sitefile <- gsub("@Soil_Type@", paste0("'", soil_type, "'"), sitefile) + } + + # Litter height + if(any(grepl("@Litter_Height@", sitefile))){ + if ("litter_height" %in% names(soil_IC_list$vals)){ + litter_height <- unlist(soil_IC_list$vals["litter_height"])[[1]] + } + else{ + litter_height <- "0.0" + } + sitefile <- gsub("@Litter_Height@", paste0("'", litter_height, "'"), sitefile) + } + + ## -- Layers -- ## + + # Check how many depth layers is given and the depth of each + depth <- soil_IC_list$dims$depth + layer_count <- length(depth) + # Check what stratums is given for the layers + layer_div <- soil_IC_list$vals$stratum + + + for(depth_level in 1:layer_count){ + + soil_one_block <- NULL + # Diskretization -- Every soil layer is still divided to several layers, this layer that contains these + # sublayers are here called a block. In LDNDC it is not suggested to use too tight layers so still will be + # divided to smaller layers that are not so thick. + + + + # For 1st level + if(depth_level == 1){ + disk <- depth[depth_level] * 1000 / layer_div[depth_level] + } + + # For rest of layers, depth is informed as cumulative, but LDNDC uses thickness + else{ + disk <- (depth[depth_level] - depth[depth_level-1]) * 1000 / layer_div[depth_level] + } + + for(disk_level in 1:layer_div[depth_level]){ + + + # Start creating a soil layer + soil_layer_values <- paste0(" \n") + + # Add one individual layer to the block + soil_one_block <- paste(soil_one_block, soil_layer_values) + } + + # Combine the previous block of layers this and inform that "layer" changes which indicates that new + # parameter values has been used + if(depth_level != layer_count){ + soil_all_block <- paste(soil_all_block, soil_one_block, "\n \n") + } else { + soil_all_block <- paste(soil_all_block, soil_one_block, "\n") + } + } + + # If there is less than seven layer blocks initialised, use the default ones for bottom + # if(depth_level < 6){ + # soil_combine <- paste(soil_all_block, "\t\t", paste(soil_layer[-c(1:depth_level)], collapse = "\n \t\t")) + # } + # else{ + soil_combine <- soil_all_block + #} + + } + + else{ + PEcAn.logger::logger.severe("More than one soil path given: only one soil path is supported") + } + + + ## Writing and saving species- and siteparameters + initial soil conditions + speciesparfile_pfts <- NULL + + # Handle the populating of speciesparameters after we have read the info from priors + for(pftn in pfts_run){ + ## Crops ## + # Barley + if(pftn == "barley"){ + speciesparfile_pfts <- paste0(speciesparfile_pfts, + "\t\t\t \n", + species_par_values["barley"][[1]], + "\t\t\t \n\n") + } + # Oat + if(pftn == "oat"){ + speciesparfile_pfts <- paste0(speciesparfile_pfts, + "\t\t\t \n", + species_par_values["oat"][[1]], + "\t\t\t \n\n") + } + # Triticale + if(pftn == "triticale"){ + speciesparfile_pfts <- paste0(speciesparfile_pfts, + "\t\t\t \n", + species_par_values["triticale"][[1]], + "\t\t\t \n\n") + } + ## Grass + # Timothy + if(pftn == "timothy"){ + speciesparfile_pfts <- paste0(speciesparfile_pfts, + "\t\t\t \n", + species_par_values["timothy"][[1]], + "\t\t\t \n\n") + } + + # Meadow + if(pftn == "meadow"){ + speciesparfile_pfts <- paste0(speciesparfile_pfts, + "\t\t\t \n", + species_par_values["meadow"][[1]], + "\t\t\t \n\n") + } + + ## Forest + # Pipy, need to check a correct name for this wood species + if(pftn == "pipy"){ + speciesparfile_pfts <- paste0(speciesparfile_pfts, + "\t\t\t \n", + species_par_values["pipy"][[1]], + "\t\t\t \n\n") + } + } + + + # Combine the speciesparameter info + speciesparfile <- gsub("@Info@", speciesparfile_pfts, speciesparfile) + + + # Write to a new xml-file, which will be used on a run. Every simulation run will have + # their own set of speciesparameters values + writeLines(speciesparfile, con = file.path(settings$rundir, run.id, "speciesparameters.xml")) + + ## Write events to a new xml file + writeLines(eventsfile, con = file.path(settings$rundir, run.id, "events.xml")) + + + # Handle the populating of siteparameters + siteparfile <- gsub("@Info@", h.2, siteparfile) + + # Write siteparameters + writeLines(siteparfile, con = file.path(settings$rundir, run.id, "siteparameters.xml")) + + + + # Populate sitefile layer info with given parameter + sitefile <- gsub("@Info_Surface_Layer@", soil_combine, sitefile) + + # Write soil conditions + writeLines(sitefile, con = file.path(settings$rundir, run.id, "site.xml")) + # Write airchemistry file (not modified anywhere) + writeLines(airchemistryfile, con = file.path(settings$rundir, run.id, "airchemistry.txt")) - # Use ready airchemistry file for now - airchemistry <- readLines(con = system.file("airchemistry.txt", package = "PEcAn.LDNDC"), n = -1) - writeLines(airchemistry, con = file.path(settings$rundir, run.id, "airchemistry.txt")) #------------------------ diff --git a/models/ldndc/inst/airchemistry.txt b/models/ldndc/inst/airchemistry.txt deleted file mode 100755 index df78eca9318..00000000000 --- a/models/ldndc/inst/airchemistry.txt +++ /dev/null @@ -1,12 +0,0 @@ -%global - time = "2019-01-01/1" - -%airchemistry - id = "0" - -%attributes - co2 = "380.0" -#provide data -%data -nh4 no3 -0.6 0.3 diff --git a/models/ldndc/inst/events_template.xml b/models/ldndc/inst/events_template.xml index 3c69b98161b..dcac4512073 100644 --- a/models/ldndc/inst/events_template.xml +++ b/models/ldndc/inst/events_template.xml @@ -1,29 +1,8 @@ - - - + - - - - - - - - - - - - - - - - - - - + '@Info@' - diff --git a/models/ldndc/inst/project.ldndc b/models/ldndc/inst/project.ldndc index 8a41176b836..b53abac8cfa 100644 --- a/models/ldndc/inst/project.ldndc +++ b/models/ldndc/inst/project.ldndc @@ -13,7 +13,8 @@ - + + '@Groundwater@' diff --git a/models/ldndc/inst/setup.xml b/models/ldndc/inst/setup.xml deleted file mode 100755 index 011408330df..00000000000 --- a/models/ldndc/inst/setup.xml +++ /dev/null @@ -1,32 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/models/ldndc/inst/setup_template.xml b/models/ldndc/inst/setup_template.xml new file mode 100644 index 00000000000..d393674164c --- /dev/null +++ b/models/ldndc/inst/setup_template.xml @@ -0,0 +1,33 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + '@reportarable@' + + + + diff --git a/models/ldndc/inst/site.xml b/models/ldndc/inst/site.xml deleted file mode 100755 index fe1985ccfcf..00000000000 --- a/models/ldndc/inst/site.xml +++ /dev/null @@ -1,12 +0,0 @@ - - - - - - - - - - - - diff --git a/models/ldndc/inst/site_template.xml b/models/ldndc/inst/site_template.xml new file mode 100644 index 00000000000..b5f76665710 --- /dev/null +++ b/models/ldndc/inst/site_template.xml @@ -0,0 +1,9 @@ + + + + + + @Info_Surface_Layer@ + + + diff --git a/models/ldndc/inst/siteparameters.xml b/models/ldndc/inst/siteparameters_template.xml old mode 100755 new mode 100644 similarity index 73% rename from models/ldndc/inst/siteparameters.xml rename to models/ldndc/inst/siteparameters_template.xml index 18731a0b9b4..dc60a0e236a --- a/models/ldndc/inst/siteparameters.xml +++ b/models/ldndc/inst/siteparameters_template.xml @@ -1,6 +1,6 @@ - + @Info@ diff --git a/models/ldndc/tests/testthat.R b/models/ldndc/tests/testthat.R index d93798b4ffe..f44dabc6ffb 100644 --- a/models/ldndc/tests/testthat.R +++ b/models/ldndc/tests/testthat.R @@ -1,11 +1,3 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2012 University of Illinois, NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# University of Illinois/NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- library(testthat) library(PEcAn.utils) diff --git a/models/linkages/DESCRIPTION b/models/linkages/DESCRIPTION index 065c53893b1..79e0b26d0fb 100644 --- a/models/linkages/DESCRIPTION +++ b/models/linkages/DESCRIPTION @@ -1,24 +1,22 @@ Package: PEcAn.LINKAGES Type: Package Title: PEcAn Package for Integration of the LINKAGES Model -Version: 1.7.2 -Date: 2021-10-04 +Version: 1.7.3.9000 Authors@R: c(person("Mike", "Dietze", role = c("aut"), email = "dietze@bu.edu"), person("Ann", "Raiho", role = c("aut", "cre"), email = "araiho@nd.edu"), person("University of Illinois, NCSA", role = c("cph"))) -Author: Ann Raiho, Mike Dietze -Maintainer: Ann Raiho Description: This module provides functions to link the (LINKAGES) to PEcAn. -Depends: - PEcAn.utils Imports: - PEcAn.data.atmosphere, + PEcAn.data.land, + PEcAn.DB, PEcAn.logger, PEcAn.remote, + PEcAn.utils, lubridate (>= 1.6.0), ncdf4 (>= 1.15), + utils Suggests: testthat (>= 1.0.2), linkages @@ -31,4 +29,4 @@ Copyright: Authors LazyLoad: yes LazyData: FALSE Encoding: UTF-8 -RoxygenNote: 7.2.3 +RoxygenNote: 7.3.2 diff --git a/models/linkages/LICENSE b/models/linkages/LICENSE index 5a9e44128f1..09ef35a60b4 100644 --- a/models/linkages/LICENSE +++ b/models/linkages/LICENSE @@ -1,34 +1,3 @@ -## This is the master copy of the PEcAn License - -University of Illinois/NCSA Open Source License - -Copyright (c) 2012, University of Illinois, NCSA. All rights reserved. - -PEcAn project -www.pecanproject.org - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal with the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -- Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimers. -- Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimers in the - documentation and/or other materials provided with the distribution. -- Neither the names of University of Illinois, NCSA, nor the names - of its contributors may be used to endorse or promote products - derived from this Software without specific prior written permission. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR -ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF -CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE SOFTWARE. - +YEAR: 2024 +COPYRIGHT HOLDER: PEcAn Project +ORGANIZATION: PEcAn Project, authors affiliations diff --git a/models/linkages/NAMESPACE b/models/linkages/NAMESPACE index cfd232ec26b..2897d5a0467 100644 --- a/models/linkages/NAMESPACE +++ b/models/linkages/NAMESPACE @@ -6,5 +6,3 @@ export(read_restart.LINKAGES) export(split_inputs.LINKAGES) export(write.config.LINKAGES) export(write_restart.LINKAGES) -importFrom(ncdf4,ncdim_def) -importFrom(ncdf4,ncvar_def) diff --git a/models/linkages/NEWS.md b/models/linkages/NEWS.md new file mode 100644 index 00000000000..0899764fc20 --- /dev/null +++ b/models/linkages/NEWS.md @@ -0,0 +1,10 @@ +# PEcAn.LINKAGES 1.7.3.9000 + +## License change +* PEcAn.LINKAGES is now distributed under the BSD three-clause license instead of the NCSA Open Source license. + + +# PEcAn.LINKAGES 1.7.1 + +* All changes in 1.7.1 and earlier were recorded in a single file for all of the PEcAn packages; please see +https://github.com/PecanProject/pecan/blob/v1.7.1/CHANGELOG.md for details. diff --git a/models/linkages/R/met2model.LINKAGES.R b/models/linkages/R/met2model.LINKAGES.R index 9d7c28136b7..f73a2008fd6 100644 --- a/models/linkages/R/met2model.LINKAGES.R +++ b/models/linkages/R/met2model.LINKAGES.R @@ -1,28 +1,22 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2012 University of Illinois, NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# University of Illinois/NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- - -##-------------------------------------------------------------------------------------------------# -##' Converts a met CF file to a model specific met file. The input -##' files are calld /.YYYY.cf -##' -##' @name met2model.LINKAGES -##' @title Write LINKAGES met files -##' @param in.path path on disk where CF file lives -##' @param in.prefix prefix for each file -##' @param outfolder location where model specific output is written. -##' @return OK if everything was succesful. -##' @export -##' @author Ann Raiho, Betsy Cowdery -##-------------------------------------------------------------------------------------------------# +#' Write LINKAGES met files +#' +#' Converts a met CF file to a model specific met file. The input +#' files are calld /.YYYY.cf +#' +#' @param in.path path on disk where CF file lives +#' @param in.prefix prefix for each file +#' @param outfolder location where model specific output is written +#' @param start_date,end_date when to start and end conversion. +#' Only year portion is used +#' @param overwrite Force replacement of an existing output file? +#' @param verbose ignored +#' @param ... Additional arguments, currently ignored +#' @return OK if everything was succesful. +#' @export +#' @author Ann Raiho, Betsy Cowdery +#' met2model.LINKAGES <- function(in.path, in.prefix, outfolder, start_date, end_date, overwrite = FALSE, verbose = FALSE, ...) { - library(PEcAn.utils) start_date <- as.POSIXlt(start_date, tz = "GMT") end_date <- as.POSIXlt(end_date, tz = "GMT") @@ -60,8 +54,6 @@ met2model.LINKAGES <- function(in.path, in.prefix, outfolder, start_date, end_da } } - library(PEcAn.data.atmosphere) - ## check to see if the outfolder is defined, if not create directory for output if (!file.exists(outfolder)) { dir.create(outfolder) @@ -98,7 +90,7 @@ met2model.LINKAGES <- function(in.path, in.prefix, outfolder, start_date, end_da month_matrix_precip[i, m] <- (sum(ncprecipf[DOY_vec_hr[m]:(DOY_vec_hr[m + 1] - 1)]) * dt / 10) } ncdf4::nc_close(ncin) - # if(i%%100==0) cat(i,' '); flush.console() + # if(i%%100==0) cat(i,' '); utils::flush.console() } month_matrix_temp_mean <- matrix(NA, nyear, 12) @@ -120,7 +112,7 @@ met2model.LINKAGES <- function(in.path, in.prefix, outfolder, start_date, end_da if (i %% 100 == 0) { cat(i, " ") } - flush.console() + utils::flush.console() } precip.mat <- month_matrix_precip diff --git a/models/linkages/R/model2netcdf.LINKAGES.R b/models/linkages/R/model2netcdf.LINKAGES.R index e2a059c3fec..68afa4ecd6f 100644 --- a/models/linkages/R/model2netcdf.LINKAGES.R +++ b/models/linkages/R/model2netcdf.LINKAGES.R @@ -1,33 +1,21 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2012 University of Illinois, NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# University of Illinois/NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- - -##-------------------------------------------------------------------------------------------------# -##' Convert MODEL output into the NACP Intercomparison format (ALMA using netCDF) -##' -##' @name model2netcdf.LINKAGES -##' @title Code to convert LINKAGES's output into netCDF format -##' -##' @param outdir Location of model output -##' @param sitelat Latitude of the site -##' @param sitelon Longitude of the site -##' @param start_date Start time of the simulation -##' @param end_date End time of the simulation -##' @export -##' -##' @author Ann Raiho, Betsy Cowdery -##' @importFrom ncdf4 ncdim_def ncvar_def +#' Convert LINKAGES's output into netCDF format +#' +#' Convert MODEL output into the NACP Intercomparison format (ALMA using netCDF) +#' +#' @param outdir Location of model output +#' @param sitelat Latitude of the site +#' @param sitelon Longitude of the site +#' @param start_date Start time of the simulation +#' @param end_date End time of the simulation +#' @param pft_names names of PFTs to use in output labels +#' @export +#' +#' @author Ann Raiho, Betsy Cowdery +#' model2netcdf.LINKAGES <- function(outdir, sitelat, sitelon, start_date = NULL, end_date = NULL, pft_names = NULL) { # , PFTs) { logger.severe('NOT IMPLEMENTED') - library(PEcAn.utils) - ### Read in model output in linkages format load(file.path(outdir, "linkages.out.Rdata")) # linkages.output.dims <- dim(linkages.output) diff --git a/models/linkages/R/read_restart.LINKAGES.R b/models/linkages/R/read_restart.LINKAGES.R index c4173f888f1..2fa8d820ba4 100644 --- a/models/linkages/R/read_restart.LINKAGES.R +++ b/models/linkages/R/read_restart.LINKAGES.R @@ -1,33 +1,23 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2012 University of Illinois, NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# University of Illinois/NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- - -##' @title read_restart.LINKAGES -##' @name read_restart.LINKAGES -##' @author Ann Raiho \email{araiho@@nd.edu} -##' -##' @param outdir output directory -##' @param runid run ID -##' @param stop.time year that is being read -##' @param multi.settings PEcAn settings object -##' @param var.names var.names to be extracted -##' -##' @description Read Restart for LINKAGES -##' -##' @return X.vec vector of forecasts -##' @export -##' +#' Read Restart for LINKAGES +#' +#' @author Ann Raiho \email{araiho@@nd.edu} +#' +#' @param outdir output directory +#' @param runid run ID +#' @param stop.time year that is being read +#' @param settings PEcAn settings object +#' @param var.names var.names to be extracted +#' @param params passed on to return value +#' +#' @return X.vec vector of forecasts +#' @export +#' read_restart.LINKAGES <- function(outdir, runid, stop.time, settings, var.names = NULL, params = NULL) { - + # Read ensemble output - ens <- read.output(runid = runid, - outdir = file.path(outdir, runid), - start.year = lubridate::year(stop.time), + ens <- PEcAn.utils::read.output(runid = runid, + outdir = file.path(outdir, runid), + start.year = lubridate::year(stop.time), end.year = lubridate::year(stop.time), variables = var.names, pft.name = unlist(sapply(settings$pfts,'[[', "name"))) # change to just 'AGB' for plot level biomass if(!is.na(ens)){ @@ -38,21 +28,21 @@ read_restart.LINKAGES <- function(outdir, runid, stop.time, settings, var.names } #ens.pft.names <- grep("pft", names(ens)) #names(ens[[grep("pft", names(ens))]]) <- pft.names - + forecast <- list() if ("Fcomp" %in% var.names) { - forecast[[length(forecast)+1]] <- ens$AGB.pft #already has C #* unit.conv + forecast[[length(forecast)+1]] <- ens$AGB.pft #already has C #* unit.conv names(forecast[[length(forecast)]]) <- paste0('Fcomp.',pft.names) } - + if ("AGB.pft" %in% var.names) { - forecast[[length(forecast)+1]] <- ens$AGB.pft #already has C #* unit.conv + forecast[[length(forecast)+1]] <- ens$AGB.pft #already has C #* unit.conv names(forecast[[length(forecast)]]) <- paste0('AGB.pft.',pft.names) } - + if ("TotSoilCarb" %in% var.names) { - forecast[[length(forecast)+1]] <- ens$TotSoilCarb #PEcAn.utils::ud_convert(ens$TotSoilCarb, "kg/m^2", "Mg/ha") #* unit.conv + forecast[[length(forecast)+1]] <- ens$TotSoilCarb #PEcAn.utils::ud_convert(ens$TotSoilCarb, "kg/m^2", "Mg/ha") #* unit.conv names(forecast[[length(forecast)]]) <- c("TotSoilCarb") } @@ -72,7 +62,6 @@ read_restart.LINKAGES <- function(outdir, runid, stop.time, settings, var.names # Put forecast into vector print(runid) X_tmp <- list(X = unlist(forecast), params = params) - + return(X_tmp) } - diff --git a/models/linkages/R/sample.IC.LINKAGES.R b/models/linkages/R/sample.IC.LINKAGES.R index 3c77427a5f2..a1d5b18e1fd 100644 --- a/models/linkages/R/sample.IC.LINKAGES.R +++ b/models/linkages/R/sample.IC.LINKAGES.R @@ -1,25 +1,16 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2012 University of Illinois, NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# University of Illinois/NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- - sample.IC.LINKAGES <- function(ne, state, year = NULL) { ## g C * m-2 ground area in wood (above-ground + roots) biomass_tsca = ifelse(rep("biomass_tsca" %in% names(state), ne), state$biomass_tsca[1, sample.int(ncol(state$biomass_tsca),ne), 1] * 0.1, ## unit Mg/ha ->kg/m2 - runif(ne, 0, 14000)) ## prior + stats::runif(ne, 0, 14000)) ## prior biomass_acsa3 = ifelse(rep("biomass_acsa3" %in% names(state), ne), state$biomass_acsa3[1, sample.int(ncol(state$biomass_acsa3), ne), 1] * 0.1, ## unit Mg/ha ->kg/m2 - runif(ne, 0, 14000)) ## prior + stats::runif(ne, 0, 14000)) ## prior biomass_beal2 = ifelse(rep("biomass_beal2" %in% names(state),ne), state$biomass_beal2[1, sample.int(ncol(state$biomass_beal2),ne), 1] * 0.1, ## unit Mg/ha ->kg/m2 - runif(ne, 0, 14000)) ## prior + stats::runif(ne, 0, 14000)) ## prior biomass_thoc2 = ifelse(rep("biomass_thoc2" %in% names(state),ne), state$biomass_thoc2[1, sample.int(ncol(state$biomass_thoc2), ne), 1] * 0.1, ## unit Mg/ha ->kg/m2 - runif(ne, 0, 14000)) ## prior + stats::runif(ne, 0, 14000)) ## prior return(data.frame(biomass_tsca, biomass_acsa3, biomass_beal2, biomass_thoc2)) } # sample.IC.LINKAGES diff --git a/models/linkages/R/spinup.LINKAGES.R b/models/linkages/R/spinup.LINKAGES.R index 1b9df3592e3..cff880fcf5a 100644 --- a/models/linkages/R/spinup.LINKAGES.R +++ b/models/linkages/R/spinup.LINKAGES.R @@ -1,12 +1,3 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2012 University of Illinois, NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# University of Illinois/NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- - spinup.LINKAGES <- function(start.year, end.year, temp.mat, precip.mat, paleon = NULL) { if (is.null(paleon)) { paleon <- TRUE # Why not just have `paleon = TRUE` above? diff --git a/models/linkages/R/split_inputs.LINKAGES.R b/models/linkages/R/split_inputs.LINKAGES.R index 142b9d64423..24e7d68051e 100644 --- a/models/linkages/R/split_inputs.LINKAGES.R +++ b/models/linkages/R/split_inputs.LINKAGES.R @@ -1,24 +1,20 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2012 University of Illinois, NCSA. All rights reserved. This program and the -# accompanying materials are made available under the terms of the University of Illinois/NCSA -# Open Source License which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- - -##' @title split_inputs.LINKAGES -##' @name split_inputs.LINKAGES -##' @author Ann Raiho -##' -##' @param settings -##' @param start.time -##' @param stop.time -##' @description Splits climate met for LINKAGES -##' -##' @return files split up climate files -##' @export -##' +#' split_inputs.LINKAGES +#' +#' Splits climate met for LINKAGES +#' +#' Stub implementation -- currently returns `inputs` and ignores all other arguments +#' +#' @author Ann Raiho +#' +#' @param settings ignored +#' @param start.time,stop.time ignored +#' @param inputs returned +#' +#' @return files split up climate files +#' @export +#' split_inputs.LINKAGES <- function(settings, start.time, stop.time, inputs) { - + return(inputs) - + } # split_inputs.LINKAGES diff --git a/models/linkages/R/version.R b/models/linkages/R/version.R new file mode 100644 index 00000000000..0e58d885272 --- /dev/null +++ b/models/linkages/R/version.R @@ -0,0 +1,3 @@ +# Set at package install time, used by pecan.all::pecan_version() +# to identify development versions of packages +.build_hash <- Sys.getenv("PECAN_GIT_REV", "unknown") diff --git a/models/linkages/R/write.config.LINKAGES.R b/models/linkages/R/write.config.LINKAGES.R index b656d14f294..43c300a33ca 100644 --- a/models/linkages/R/write.config.LINKAGES.R +++ b/models/linkages/R/write.config.LINKAGES.R @@ -1,28 +1,25 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2012 University of Illinois, NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# University of Illinois/NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- - -##-------------------------------------------------------------------------------------------------# -##' Writes a LINKAGES config file. -##' -##' Requires a pft xml object, a list of trait values for a single model run, -##' and the name of the file to create -##' -##' @name write.config.LINKAGES -##' @title Write LINKAGES configuration files -##' @param defaults list of defaults to process -##' @param trait.samples vector of samples for a given trait -##' @param settings list of settings from pecan settings file -##' @param run.id id of run -##' @return configuration file for LINKAGES for given run -##' @export -##' @author Ann Raiho, Betsy Cowdery -##-------------------------------------------------------------------------------------------------# +#' Writes a LINKAGES config file. +#' +#' Requires a pft xml object, a list of trait values for a single model run, +#' and the name of the file to create +#' +#' @param defaults list of defaults to process +#' @param trait.values vector of samples for a given trait +#' @param settings list of settings from pecan settings file +#' @param run.id id of run +#' @param restart logical: Write a restart file? +#' If NULL (default), treated as FALSE +#' @param spinup logical: perform spinup using `spinup.LINKAGES()`? +#' If NULL (default), treated as FALSE +#' @param inputs inputs section of a PEcAn settings object. +#' Currently only used for climate file (inputs$met$path), +#' which is taken from `settings$input$met$path` if `inputs` is NULL. +#' @param IC TODO currently ignored +#' +#' @return configuration file for LINKAGES for given run +#' @export +#' @author Ann Raiho, Betsy Cowdery +#' write.config.LINKAGES <- function(defaults = NULL, trait.values, settings, run.id, restart = NULL, spinup = FALSE, inputs = NULL, IC = NULL) { @@ -35,9 +32,7 @@ write.config.LINKAGES <- function(defaults = NULL, trait.values, settings, run.i } ##TO DO add restart file as IC for HF - - library(linkages) - + # find out where to write run/ouput rundir <- file.path(settings$host$rundir, run.id) if (!file.exists(rundir)) { # why not use `dir.exists`? @@ -62,10 +57,10 @@ write.config.LINKAGES <- function(defaults = NULL, trait.values, settings, run.i bgs <- 120 egs <- 273 - texture <- read.csv(system.file("texture.csv", package = "PEcAn.LINKAGES")) + texture <- utils::read.csv(system.file("texture.csv", package = "PEcAn.LINKAGES")) - dbcon <- db.open(settings$database$bety) - on.exit(db.close(dbcon), add = TRUE) + dbcon <- PEcAn.DB::db.open(settings$database$bety) + on.exit(PEcAn.DB::db.close(dbcon), add = TRUE) if("soil" %in% names(settings$run$inputs)){ ## open soil file @@ -78,11 +73,11 @@ write.config.LINKAGES <- function(defaults = NULL, trait.values, settings, run.i if(length(fc) > 1) fc <- mean(fc) if(length(dry) > 1) dry <- mean(dry) ncdf4::nc_close(nc.soil) - + }else{ - soils <- db.query(paste("SELECT soil,som,sand_pct,clay_pct,soilnotes FROM sites WHERE id =", settings$run$site$id), + soils <- PEcAn.DB::db.query(paste("SELECT soil,som,sand_pct,clay_pct,soilnotes FROM sites WHERE id =", settings$run$site$id), con = dbcon) - + soil.dat <- PEcAn.data.land::soil_params(sand = soils$sand_pct/100, clay = soils$clay_pct/100, silt = 100 - soils$sand_pct - soils$clay_pct) fc <- soil.dat$volume_fraction_of_water_in_soil_at_field_capacity * 100 @@ -91,11 +86,11 @@ write.config.LINKAGES <- function(defaults = NULL, trait.values, settings, run.i if(is.na(fc)) fc = 5 if(is.na(dry)) dry = 5 } - - fdat <- read.csv(system.file("fdat.csv", package = "linkages"), header = FALSE) #litter quality parameters - clat <- read.csv(system.file("clat.csv", package = "linkages"), header = FALSE) + + fdat <- utils::read.csv(system.file("fdat.csv", package = "linkages"), header = FALSE) #litter quality parameters + clat <- utils::read.csv(system.file("clat.csv", package = "linkages"), header = FALSE) load(system.file("switch.mat.Rdata", package = "linkages")) - + if(!is.null(inputs)){ climate_file <- inputs$met$path load(climate_file) @@ -109,8 +104,8 @@ write.config.LINKAGES <- function(defaults = NULL, trait.values, settings, run.i basesc <- 74 basesn <- 1.64 - - spp.params.default <- read.csv(system.file("spp_matrix.csv", package = "linkages")) # default spp.params + + spp.params.default <- utils::read.csv(system.file("spp_matrix.csv", package = "linkages")) # default spp.params nspec <- length(settings$pfts) spp.params.save <- numeric(nspec) for (i in seq_len(nspec)) { diff --git a/models/linkages/R/write_restart.LINKAGES.R b/models/linkages/R/write_restart.LINKAGES.R index 77b073590b5..8e0b442048a 100644 --- a/models/linkages/R/write_restart.LINKAGES.R +++ b/models/linkages/R/write_restart.LINKAGES.R @@ -1,35 +1,22 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2012 University of Illinois, NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# University of Illinois/NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- - ##' @title write_restart.LINKAGES ##' @name write_restart.LINKAGES ##' @author Ann Raiho \email{araiho@@nd.edu} ##' ##' @param outdir output directory ##' @param runid run ID -##' @param time year that is being read +##' @param start.time,stop.time year that is being read ##' @param settings PEcAn settings object ##' @param new.state analysis vector ##' @param RENAME flag to either rename output file or not -##' @param variables -##' @param sample_parameters -##' @param trait.values +##' @param new.params updated parameter values to write. +## Format is named list with each entry matching a PFT +##' @param inputs passed on to `write.config.LINKAGES()` ##' ##' @description Write restart files for LINKAGES ##' ##' @return NONE ##' @export ##' - -# outdir, runid, time, settings, new.state, variables, sample_parameters = FALSE, trait.values = -# NA,met=NULL,RENAME = TRUE - write_restart.LINKAGES <- function(outdir, runid, start.time, stop.time, settings, new.state, RENAME = TRUE, new.params, inputs) { @@ -90,7 +77,7 @@ write_restart.LINKAGES <- function(outdir, runid, start.time, stop.time, #distance.matrix <- rbind(c(0,3,1,2), c(3,0,2,1), c(1,2,0,3), c(2,1,3,0)) ## HACK - spp.params.default <- read.csv(system.file("spp_matrix.csv", package = "linkages")) #default spp.params + spp.params.default <- utils::read.csv(system.file("spp_matrix.csv", package = "linkages")) #default spp.params nspec <- length(settings$pfts) spp.params.save <- numeric(nspec) for (i in seq_len(nspec)) { @@ -142,7 +129,7 @@ write_restart.LINKAGES <- function(outdir, runid, start.time, stop.time, if (!file.exists(outfile)) { outfile <- file.path(outdir, runid, paste0(start.time, "linkages.out.Rdata")) if (!file.exists(outfile)) { - logger.severe(paste0("missing outfile ens #", runid)) + PEcAn.logger::logger.severe(paste0("missing outfile ens #", runid)) } } print(paste0("runid = ", runid)) @@ -217,7 +204,7 @@ write_restart.LINKAGES <- function(outdir, runid, start.time, stop.time, data2 <- data.frame(ind.biomass = ind.biomass, n.index = n.index) - mean.biomass.spp <- aggregate(ind.biomass ~ n.index, mean, data = data2) # calculate mean individual biomass for each species + mean.biomass.spp <- stats::aggregate(ind.biomass ~ n.index, mean, data = data2) # calculate mean individual biomass for each species #browser() # calculate number of individuals needed to match new.state for (s in seq_along(settings$pfts)) { @@ -242,7 +229,7 @@ write_restart.LINKAGES <- function(outdir, runid, start.time, stop.time, #making sure to stick with density dependence rules in linkages (< 198 trees per 800/m^2) #someday we could think about estimating this parameter from data - if(sum(new.ntrees,na.rm = T) > 198) new.ntrees <- round((new.ntrees / sum(new.ntrees)) * runif(1,195,198)) + if(sum(new.ntrees,na.rm = T) > 198) new.ntrees <- round((new.ntrees / sum(new.ntrees)) * stats::runif(1,195,198)) print(paste0("new.ntrees =", new.ntrees)) @@ -319,10 +306,13 @@ write_restart.LINKAGES <- function(outdir, runid, start.time, stop.time, spp.biomass.params = spp.biomass.params) * as.numeric(bcorr[s]) bMax <- 200 for (j in nl:nu) { - dbh.temp[j] <- optimize(merit, c(1, bMax), b_obs = b_obs[j], - spp.biomass.params = spp.biomass.params)$minimum + dbh.temp[j] <- stats::optimize( + merit, + c(1, bMax), + b_obs = b_obs[j], + spp.biomass.params = spp.biomass.params)$minimum } - + b_calc1[s] <- sum(biomass_function(dbh.temp[nl:nu], spp.biomass.params = spp.biomass.params)) * (1 / 833) * 0.48 nl <- nu + 1 @@ -375,14 +365,18 @@ write_restart.LINKAGES <- function(outdir, runid, start.time, stop.time, save(dbh, tyl, ntrees, nogro, ksprt, iage, C.mat, ncohrt, file = restart.file) # make a new settings with the right years min start date and end date - fail in informative way - - settings$run$start.date <- paste0(formatC(year(start.time + 1), width = 4, format = "d", flag = "0"), "/01/01") - settings$run$end.date <- paste0(formatC(year(stop.time), width = 4, format = "d", flag = "0"), "/12/31") - - do.call(write.config.LINKAGES, - args = list(trait.values = new.params, settings = settings, run.id = runid, + + settings$run$start.date <- paste0( + formatC(lubridate::year(start.time + 1), width = 4, format = "d", flag = "0"), + "/01/01") + settings$run$end.date <- paste0( + formatC(lubridate::year(stop.time), width = 4, format = "d", flag = "0"), + "/12/31") + + do.call(write.config.LINKAGES, + args = list(trait.values = new.params, settings = settings, run.id = runid, restart = TRUE, spinup = FALSE, inputs = inputs)) - + # save original output if (RENAME) { file.rename(file.path(outdir, runid, "linkages.out.Rdata"), diff --git a/models/linkages/man/met2model.LINKAGES.Rd b/models/linkages/man/met2model.LINKAGES.Rd index 35882a7da11..966f45db05b 100644 --- a/models/linkages/man/met2model.LINKAGES.Rd +++ b/models/linkages/man/met2model.LINKAGES.Rd @@ -20,7 +20,16 @@ met2model.LINKAGES( \item{in.prefix}{prefix for each file} -\item{outfolder}{location where model specific output is written.} +\item{outfolder}{location where model specific output is written} + +\item{start_date, end_date}{when to start and end conversion. +Only year portion is used} + +\item{overwrite}{Force replacement of an existing output file?} + +\item{verbose}{ignored} + +\item{...}{Additional arguments, currently ignored} } \value{ OK if everything was succesful. diff --git a/models/linkages/man/model2netcdf.LINKAGES.Rd b/models/linkages/man/model2netcdf.LINKAGES.Rd index e3f9704f144..5d54201fbb6 100644 --- a/models/linkages/man/model2netcdf.LINKAGES.Rd +++ b/models/linkages/man/model2netcdf.LINKAGES.Rd @@ -2,7 +2,7 @@ % Please edit documentation in R/model2netcdf.LINKAGES.R \name{model2netcdf.LINKAGES} \alias{model2netcdf.LINKAGES} -\title{Code to convert LINKAGES's output into netCDF format} +\title{Convert LINKAGES's output into netCDF format} \usage{ model2netcdf.LINKAGES( outdir, @@ -23,6 +23,8 @@ model2netcdf.LINKAGES( \item{start_date}{Start time of the simulation} \item{end_date}{End time of the simulation} + +\item{pft_names}{names of PFTs to use in output labels} } \description{ Convert MODEL output into the NACP Intercomparison format (ALMA using netCDF) diff --git a/models/linkages/man/read_restart.LINKAGES.Rd b/models/linkages/man/read_restart.LINKAGES.Rd index 5599ca9ee14..3c8875bea83 100644 --- a/models/linkages/man/read_restart.LINKAGES.Rd +++ b/models/linkages/man/read_restart.LINKAGES.Rd @@ -2,7 +2,7 @@ % Please edit documentation in R/read_restart.LINKAGES.R \name{read_restart.LINKAGES} \alias{read_restart.LINKAGES} -\title{read_restart.LINKAGES} +\title{Read Restart for LINKAGES} \usage{ read_restart.LINKAGES( outdir, @@ -20,9 +20,11 @@ read_restart.LINKAGES( \item{stop.time}{year that is being read} +\item{settings}{PEcAn settings object} + \item{var.names}{var.names to be extracted} -\item{multi.settings}{PEcAn settings object} +\item{params}{passed on to return value} } \value{ X.vec vector of forecasts diff --git a/models/linkages/man/split_inputs.LINKAGES.Rd b/models/linkages/man/split_inputs.LINKAGES.Rd index 5d9e120bc26..f740553713c 100644 --- a/models/linkages/man/split_inputs.LINKAGES.Rd +++ b/models/linkages/man/split_inputs.LINKAGES.Rd @@ -6,12 +6,22 @@ \usage{ split_inputs.LINKAGES(settings, start.time, stop.time, inputs) } +\arguments{ +\item{settings}{ignored} + +\item{start.time, stop.time}{ignored} + +\item{inputs}{returned} +} \value{ files split up climate files } \description{ Splits climate met for LINKAGES } +\details{ +Stub implementation -- currently returns `inputs` and ignores all other arguments +} \author{ Ann Raiho } diff --git a/models/linkages/man/write.config.LINKAGES.Rd b/models/linkages/man/write.config.LINKAGES.Rd index 339dfcb89db..042c524976f 100644 --- a/models/linkages/man/write.config.LINKAGES.Rd +++ b/models/linkages/man/write.config.LINKAGES.Rd @@ -2,7 +2,7 @@ % Please edit documentation in R/write.config.LINKAGES.R \name{write.config.LINKAGES} \alias{write.config.LINKAGES} -\title{Write LINKAGES configuration files} +\title{Writes a LINKAGES config file.} \usage{ write.config.LINKAGES( defaults = NULL, @@ -18,19 +18,28 @@ write.config.LINKAGES( \arguments{ \item{defaults}{list of defaults to process} +\item{trait.values}{vector of samples for a given trait} + \item{settings}{list of settings from pecan settings file} \item{run.id}{id of run} -\item{trait.samples}{vector of samples for a given trait} +\item{restart}{logical: Write a restart file? +If NULL (default), treated as FALSE} + +\item{spinup}{logical: perform spinup using `spinup.LINKAGES()`? +If NULL (default), treated as FALSE} + +\item{inputs}{inputs section of a PEcAn settings object. +Currently only used for climate file (inputs$met$path), +which is taken from `settings$input$met$path` if `inputs` is NULL.} + +\item{IC}{TODO currently ignored} } \value{ configuration file for LINKAGES for given run } \description{ -Writes a LINKAGES config file. -} -\details{ Requires a pft xml object, a list of trait values for a single model run, and the name of the file to create } diff --git a/models/linkages/man/write_restart.LINKAGES.Rd b/models/linkages/man/write_restart.LINKAGES.Rd index 884d7aaacc7..12491874cab 100644 --- a/models/linkages/man/write_restart.LINKAGES.Rd +++ b/models/linkages/man/write_restart.LINKAGES.Rd @@ -21,15 +21,17 @@ write_restart.LINKAGES( \item{runid}{run ID} +\item{start.time, stop.time}{year that is being read} + \item{settings}{PEcAn settings object} \item{new.state}{analysis vector} \item{RENAME}{flag to either rename output file or not} -\item{time}{year that is being read} +\item{new.params}{updated parameter values to write.} -\item{trait.values}{} +\item{inputs}{passed on to `write.config.LINKAGES()`} } \value{ NONE diff --git a/models/linkages/tests/Rcheck_reference.log b/models/linkages/tests/Rcheck_reference.log index d0faa922ec8..9f1e55a1c55 100644 --- a/models/linkages/tests/Rcheck_reference.log +++ b/models/linkages/tests/Rcheck_reference.log @@ -12,43 +12,6 @@ Maintainer: ‘Ann Raiho ’ New submission -License components with restrictions and base license permitting such: - BSD_3_clause + file LICENSE -File 'LICENSE': - ## This is the master copy of the PEcAn License - - University of Illinois/NCSA Open Source License - - Copyright (c) 2012, University of Illinois, NCSA. All rights reserved. - - PEcAn project - www.pecanproject.org - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal with the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - - Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimers. - - Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimers in the - documentation and/or other materials provided with the distribution. - - Neither the names of University of Illinois, NCSA, nor the names - of its contributors may be used to endorse or promote products - derived from this Software without specific prior written permission. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - IN NO EVENT SHALL THE CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR - ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF - CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE SOFTWARE. - Unknown, possibly misspelled, fields in DESCRIPTION: ‘Remotes’ @@ -56,8 +19,6 @@ Strong dependencies not in mainstream repositories: PEcAn.utils, PEcAn.data.atmosphere, PEcAn.logger, PEcAn.remote Suggests or Enhances not in mainstream repositories: linkages - -The Date field is over a month old. * checking package namespace information ... OK * checking package dependencies ... OK * checking if this is a source package ... OK @@ -71,11 +32,7 @@ The Date field is over a month old. * checking installed package size ... OK * checking package directory ... OK * checking for future file timestamps ... OK -* checking DESCRIPTION meta-information ... NOTE -Author field differs from that derived from Authors@R - Author: ‘Ann Raiho, Mike Dietze’ - Authors@R: ‘Mike Dietze [aut], Ann Raiho [aut, cre], University of Illinois, NCSA [cph]’ - +* checking DESCRIPTION meta-information ... OK * checking top-level files ... OK * checking for left-over files ... OK * checking index information ... OK @@ -89,26 +46,11 @@ Author field differs from that derived from Authors@R * checking whether the namespace can be unloaded cleanly ... OK * checking loading without being on the library search path ... OK * checking use of S3 registration ... OK -* checking dependencies in R code ... WARNING -'::' or ':::' import not declared from: ‘PEcAn.data.land’ -'library' or 'require' call not declared from: ‘PEcAn.data.atmosphere’ -'library' or 'require' call to ‘PEcAn.utils’ which was already attached by Depends. - Please remove these calls from your code. -'library' or 'require' calls in package code: - ‘PEcAn.data.atmosphere’ ‘linkages’ - Please use :: or requireNamespace() instead. - See section 'Suggested packages' in the 'Writing R Extensions' manual. -Namespace in Imports field not imported from: ‘PEcAn.data.atmosphere’ - All declared Imports should be used. -Package in Depends field not imported from: ‘PEcAn.utils’ - These packages need to be imported from (in the NAMESPACE file) - for when this namespace is loaded but not attached. +* checking dependencies in R code ... OK * checking S3 generic/method consistency ... OK * checking replacement functions ... OK * checking foreign function calls ... OK * checking R code for possible problems ... NOTE -met2model.LINKAGES: no visible global function definition for - ‘flush.console’ model2netcdf.LINKAGES: no visible binding for global variable ‘ag.biomass’ model2netcdf.LINKAGES: no visible binding for global variable @@ -126,21 +68,6 @@ model2netcdf.LINKAGES: no visible binding for global variable ‘f.comp’ model2netcdf.LINKAGES: no visible binding for global variable ‘water’ model2netcdf.LINKAGES: no visible binding for global variable ‘abvgroundwood.biomass’ -read_restart.LINKAGES: no visible global function definition for - ‘read.output’ -sample.IC.LINKAGES: no visible global function definition for ‘runif’ -write.config.LINKAGES: no visible global function definition for - ‘read.csv’ -write.config.LINKAGES: no visible global function definition for - ‘db.open’ -write.config.LINKAGES: no visible global function definition for - ‘db.close’ -write.config.LINKAGES: no visible global function definition for - ‘db.query’ -write_restart.LINKAGES: no visible global function definition for - ‘read.csv’ -write_restart.LINKAGES: no visible global function definition for - ‘logger.severe’ write_restart.LINKAGES: no visible binding for global variable ‘ntrees.kill’ write_restart.LINKAGES: no visible binding for global variable @@ -149,64 +76,19 @@ write_restart.LINKAGES: no visible binding for global variable ‘iage.save’ write_restart.LINKAGES: no visible binding for global variable ‘dbh.save’ -write_restart.LINKAGES: no visible global function definition for - ‘aggregate’ -write_restart.LINKAGES: no visible global function definition for - ‘runif’ -write_restart.LINKAGES: no visible global function definition for - ‘optimize’ -write_restart.LINKAGES: no visible global function definition for - ‘year’ Undefined global functions or variables: - abvgroundwood.biomass ag.biomass ag.npp agb.pft aggregate area - db.close db.open db.query dbh.save et f.comp flush.console - hetero.resp iage.save leaf.litter logger.severe nee nogro.save - ntrees.kill optimize read.csv read.output runif total.soil.carbon - water year -Consider adding - importFrom("stats", "aggregate", "optimize", "runif") - importFrom("utils", "flush.console", "read.csv") -to your NAMESPACE file. + abvgroundwood.biomass ag.biomass ag.npp agb.pft area + db.close db.open db.query dbh.save et f.comp + hetero.resp iage.save leaf.litter nee nogro.save + ntrees.kill total.soil.carbon water * checking Rd files ... OK * checking Rd metadata ... OK * checking Rd line widths ... OK * checking Rd cross-references ... OK * checking for missing documentation entries ... OK * checking for code/documentation mismatches ... OK -* checking Rd \usage sections ... WARNING -Undocumented arguments in documentation object 'met2model.LINKAGES' - ‘start_date’ ‘end_date’ ‘overwrite’ ‘verbose’ ‘...’ - -Undocumented arguments in documentation object 'model2netcdf.LINKAGES' - ‘pft_names’ - -Undocumented arguments in documentation object 'read_restart.LINKAGES' - ‘settings’ ‘params’ -Documented arguments not in \usage in documentation object 'read_restart.LINKAGES': - ‘multi.settings’ - -Undocumented arguments in documentation object 'split_inputs.LINKAGES' - ‘settings’ ‘start.time’ ‘stop.time’ ‘inputs’ - -Undocumented arguments in documentation object 'write.config.LINKAGES' - ‘trait.values’ ‘restart’ ‘spinup’ ‘inputs’ ‘IC’ -Documented arguments not in \usage in documentation object 'write.config.LINKAGES': - ‘trait.samples’ - -Undocumented arguments in documentation object 'write_restart.LINKAGES' - ‘start.time’ ‘stop.time’ ‘new.params’ ‘inputs’ -Documented arguments not in \usage in documentation object 'write_restart.LINKAGES': - ‘trait.values’ - -Functions with \usage entries need to have the appropriate \alias -entries, and all their arguments documented. -The \usage entries must correspond to syntactically valid R code. -See chapter ‘Writing R documentation files’ in the ‘Writing R -Extensions’ manual. -* checking Rd contents ... WARNING -Argument items with no description in Rd object 'write_restart.LINKAGES': - ‘trait.values’ - +* checking Rd \usage sections ... OK +* checking Rd contents ... OK * checking for unstated dependencies in examples ... OK * checking examples ... NONE * checking for unstated dependencies in ‘tests’ ... OK diff --git a/models/linkages/tests/testthat.R b/models/linkages/tests/testthat.R index 15a638d15e9..bf752b76016 100644 --- a/models/linkages/tests/testthat.R +++ b/models/linkages/tests/testthat.R @@ -1,11 +1,3 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2012 University of Illinois, NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# University of Illinois/NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- library(testthat) library(PEcAn.utils) diff --git a/models/lpjguess/DESCRIPTION b/models/lpjguess/DESCRIPTION index 215b03667d7..0a476f6da2d 100644 --- a/models/lpjguess/DESCRIPTION +++ b/models/lpjguess/DESCRIPTION @@ -1,15 +1,12 @@ Package: PEcAn.LPJGUESS Type: Package Title: PEcAn Package for Integration of the LPJ-GUESS Model -Version: 1.7.2 -Date: 2021-10-04 +Version: 1.7.3.9000 Authors@R: c(person("Istem", "Fer", role = c("aut", "cre"), email = "istem.fer@fmi.fi"), person("Tony", "Gardella", role = c("aut"), email = "tonygard@bu.edu"), person("University of Illinois, NCSA", role = c("cph"))) -Author: Istem Fer, Tony Gardella -Maintainer: Istem Fer Description: This module provides functions to link LPJ-GUESS to PEcAn. Imports: PEcAn.logger, @@ -28,4 +25,4 @@ Copyright: Authors LazyLoad: yes LazyData: FALSE Encoding: UTF-8 -RoxygenNote: 7.2.3 +RoxygenNote: 7.3.2 diff --git a/models/lpjguess/LICENSE b/models/lpjguess/LICENSE index 5a9e44128f1..09ef35a60b4 100644 --- a/models/lpjguess/LICENSE +++ b/models/lpjguess/LICENSE @@ -1,34 +1,3 @@ -## This is the master copy of the PEcAn License - -University of Illinois/NCSA Open Source License - -Copyright (c) 2012, University of Illinois, NCSA. All rights reserved. - -PEcAn project -www.pecanproject.org - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal with the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -- Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimers. -- Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimers in the - documentation and/or other materials provided with the distribution. -- Neither the names of University of Illinois, NCSA, nor the names - of its contributors may be used to endorse or promote products - derived from this Software without specific prior written permission. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR -ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF -CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE SOFTWARE. - +YEAR: 2024 +COPYRIGHT HOLDER: PEcAn Project +ORGANIZATION: PEcAn Project, authors affiliations diff --git a/models/lpjguess/NEWS.md b/models/lpjguess/NEWS.md new file mode 100644 index 00000000000..63b168f3e3f --- /dev/null +++ b/models/lpjguess/NEWS.md @@ -0,0 +1,10 @@ +# PEcAn.LPJGUESS 1.7.3.9000 + +## License change +* PEcAn.LPJGUESS is now distributed under the BSD three-clause license instead of the NCSA Open Source license. + + +# PEcAn.LPJGUESS 1.7.1 + +* All changes in 1.7.1 and earlier were recorded in a single file for all of the PEcAn packages; please see +https://github.com/PecanProject/pecan/blob/v1.7.1/CHANGELOG.md for details. diff --git a/models/lpjguess/R/met2model.LPJGUESS.R b/models/lpjguess/R/met2model.LPJGUESS.R index 9b05a54db8f..ec898e203fb 100644 --- a/models/lpjguess/R/met2model.LPJGUESS.R +++ b/models/lpjguess/R/met2model.LPJGUESS.R @@ -1,12 +1,3 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2012 University of Illinois, NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# University of Illinois/NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- - # R Code to convert NetCDF CF met files into LPJ-GUESS met files ## If files already exist in 'Outfolder', the default function is NOT to overwrite them and only @@ -24,6 +15,7 @@ ##' @param end_date the end date of the data to be downloaded (will only use the year part of the date) ##' @param overwrite should existing files be overwritten ##' @param verbose should the function be very verbose +##' @param ... additional arguments, currently ignored ##' @author Istem Fer ##' @importFrom ncdf4 ncvar_get ncvar_def ncdim_def ncatt_get ncatt_put nc_close met2model.LPJGUESS <- function(in.path, in.prefix, outfolder, start_date, end_date, diff --git a/models/lpjguess/R/model2netcdf.LPJGUESS.R b/models/lpjguess/R/model2netcdf.LPJGUESS.R index a92c343fb77..c5202833104 100644 --- a/models/lpjguess/R/model2netcdf.LPJGUESS.R +++ b/models/lpjguess/R/model2netcdf.LPJGUESS.R @@ -1,12 +1,3 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2012 University of Illinois, NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# University of Illinois/NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- - ##' Convert LPJ-GUESS output to netCDF ##' ##' @name model2netcdf.LPJGUESS diff --git a/models/lpjguess/R/readStateBinary.LPJGUESS.R b/models/lpjguess/R/readStateBinary.LPJGUESS.R index 89ffb0f5969..ffba146ac52 100644 --- a/models/lpjguess/R/readStateBinary.LPJGUESS.R +++ b/models/lpjguess/R/readStateBinary.LPJGUESS.R @@ -1,12 +1,3 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2012 University of Illinois, NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# University of Illinois/NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- - # R Code to convert NetCDF CF met files into LPJ-GUESS met files ## If files already exist in 'Outfolder', the default function is NOT to overwrite them and only diff --git a/models/lpjguess/R/version.R b/models/lpjguess/R/version.R new file mode 100644 index 00000000000..0e58d885272 --- /dev/null +++ b/models/lpjguess/R/version.R @@ -0,0 +1,3 @@ +# Set at package install time, used by pecan.all::pecan_version() +# to identify development versions of packages +.build_hash <- Sys.getenv("PECAN_GIT_REV", "unknown") diff --git a/models/lpjguess/R/write.config.LPJGUESS.R b/models/lpjguess/R/write.config.LPJGUESS.R index 2574c44d6ca..fd1efed6b92 100644 --- a/models/lpjguess/R/write.config.LPJGUESS.R +++ b/models/lpjguess/R/write.config.LPJGUESS.R @@ -1,13 +1,3 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2012 University of Illinois, NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# University of Illinois/NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- - -##-------------------------------------------------------------------------------------------------# ##' Writes a LPJ-GUESS config file. ##' ##' Requires a pft xml object, a list of trait values for a single model run, @@ -16,7 +6,7 @@ ##' @name write.config.LPJGUESS ##' @title Write LPJ-GUESS configuration files ##' @param defaults list of defaults to process -##' @param trait.samples vector of samples for a given trait +##' @param trait.values vector of samples for a given trait ##' @param settings list of settings from pecan settings file ##' @param run.id id of run ##' @return configuration file for LPJ-GUESS for given run diff --git a/models/lpjguess/man/met2model.LPJGUESS.Rd b/models/lpjguess/man/met2model.LPJGUESS.Rd index 2f0ae4ee03d..bc3e16a7ca3 100644 --- a/models/lpjguess/man/met2model.LPJGUESS.Rd +++ b/models/lpjguess/man/met2model.LPJGUESS.Rd @@ -29,6 +29,8 @@ met2model.LPJGUESS( \item{overwrite}{should existing files be overwritten} \item{verbose}{should the function be very verbose} + +\item{...}{additional arguments, currently ignored} } \description{ met2model wrapper for LPJ-GUESS diff --git a/models/lpjguess/man/write.config.LPJGUESS.Rd b/models/lpjguess/man/write.config.LPJGUESS.Rd index 6778bceec8f..6192b4c5a32 100644 --- a/models/lpjguess/man/write.config.LPJGUESS.Rd +++ b/models/lpjguess/man/write.config.LPJGUESS.Rd @@ -10,11 +10,11 @@ write.config.LPJGUESS(defaults, trait.values, settings, run.id, \arguments{ \item{defaults}{list of defaults to process} +\item{trait.values}{vector of samples for a given trait} + \item{settings}{list of settings from pecan settings file} \item{run.id}{id of run} - -\item{trait.samples}{vector of samples for a given trait} } \value{ configuration file for LPJ-GUESS for given run diff --git a/models/lpjguess/tests/Rcheck_reference.log b/models/lpjguess/tests/Rcheck_reference.log index a96020fb789..15fb2464393 100644 --- a/models/lpjguess/tests/Rcheck_reference.log +++ b/models/lpjguess/tests/Rcheck_reference.log @@ -12,47 +12,8 @@ Maintainer: ‘Istem Fer ’ New submission -License components with restrictions and base license permitting such: - BSD_3_clause + file LICENSE -File 'LICENSE': - ## This is the master copy of the PEcAn License - - University of Illinois/NCSA Open Source License - - Copyright (c) 2012, University of Illinois, NCSA. All rights reserved. - - PEcAn project - www.pecanproject.org - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal with the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - - Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimers. - - Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimers in the - documentation and/or other materials provided with the distribution. - - Neither the names of University of Illinois, NCSA, nor the names - of its contributors may be used to endorse or promote products - derived from this Software without specific prior written permission. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - IN NO EVENT SHALL THE CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR - ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF - CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE SOFTWARE. - Strong dependencies not in mainstream repositories: PEcAn.logger, PEcAn.remote, PEcAn.utils - -The Date field is over a month old. * checking package namespace information ... OK * checking package dependencies ... OK * checking if this is a source package ... OK @@ -66,15 +27,7 @@ The Date field is over a month old. * checking installed package size ... OK * checking package directory ... OK * checking for future file timestamps ... OK -* checking DESCRIPTION meta-information ... NOTE -Author field differs from that derived from Authors@R - Author: ‘Istem Fer, Tony Gardella’ - Authors@R: ‘Istem Fer [aut, cre], Tony Gardella [aut], University of Illinois, NCSA [cph]’ - -Maintainer field differs from that derived from Authors@R - Maintainer: ‘Istem Fer ’ - Authors@R: ‘Istem Fer ’ - +* checking DESCRIPTION meta-information ... OK * checking top-level files ... OK * checking for left-over files ... OK * checking index information ... OK @@ -132,20 +85,7 @@ All user-level objects in a package should have documentation entries. See chapter ‘Writing R documentation files’ in the ‘Writing R Extensions’ manual. * checking for code/documentation mismatches ... OK -* checking Rd \usage sections ... WARNING -Undocumented arguments in documentation object 'met2model.LPJGUESS' - ‘...’ - -Undocumented arguments in documentation object 'write.config.LPJGUESS' - ‘trait.values’ -Documented arguments not in \usage in documentation object 'write.config.LPJGUESS': - ‘trait.samples’ - -Functions with \usage entries need to have the appropriate \alias -entries, and all their arguments documented. -The \usage entries must correspond to syntactically valid R code. -See chapter ‘Writing R documentation files’ in the ‘Writing R -Extensions’ manual. +* checking Rd \usage sections ... OK * checking Rd contents ... OK * checking for unstated dependencies in examples ... OK * checking contents of ‘data’ directory ... OK diff --git a/models/lpjguess/tests/testthat.R b/models/lpjguess/tests/testthat.R index 59cf12a4475..481a3f96014 100644 --- a/models/lpjguess/tests/testthat.R +++ b/models/lpjguess/tests/testthat.R @@ -1,11 +1,3 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2012 University of Illinois, NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# University of Illinois/NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- library(testthat) library(PEcAn.utils) diff --git a/models/maat/DESCRIPTION b/models/maat/DESCRIPTION index 4919aa251d9..ce1684852b2 100644 --- a/models/maat/DESCRIPTION +++ b/models/maat/DESCRIPTION @@ -1,13 +1,10 @@ Package: PEcAn.MAAT Type: Package Title: PEcAn Package for Integration of the MAAT Model -Version: 1.7.2 -Date: 2021-10-04 -Authors@R: as.person(c( - "Shawn Serbin [aut, cre]", - "Anthony Walker [aut]" - )) -Maintainer: Shawn Serbin +Version: 1.7.3.9000 +Authors@R: c( + person("Shawn", "Serbin", role = c("aut", "cre"), email="sserbin@bnl.gov"), + person("Anthony", "Walker", role = "aut", email="walkerap@ornl.gov")) Description: This module provides functions to wrap the MAAT model into the PEcAn workflows. Imports: PEcAn.data.atmosphere, @@ -19,12 +16,15 @@ Imports: ncdf4 (>= 1.15), XML Suggests: + knitr, + rmarkdown, testthat (>= 1.0.2) SystemRequirements: MAAT OS_type: unix License: BSD_3_clause + file LICENSE Copyright: Authors +VignetteBuilder: knitr, rmarkdown LazyLoad: yes LazyData: FALSE Encoding: UTF-8 -RoxygenNote: 7.2.3 +RoxygenNote: 7.3.2 diff --git a/models/maat/LICENSE b/models/maat/LICENSE index 5a9e44128f1..09ef35a60b4 100644 --- a/models/maat/LICENSE +++ b/models/maat/LICENSE @@ -1,34 +1,3 @@ -## This is the master copy of the PEcAn License - -University of Illinois/NCSA Open Source License - -Copyright (c) 2012, University of Illinois, NCSA. All rights reserved. - -PEcAn project -www.pecanproject.org - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal with the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -- Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimers. -- Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimers in the - documentation and/or other materials provided with the distribution. -- Neither the names of University of Illinois, NCSA, nor the names - of its contributors may be used to endorse or promote products - derived from this Software without specific prior written permission. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR -ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF -CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE SOFTWARE. - +YEAR: 2024 +COPYRIGHT HOLDER: PEcAn Project +ORGANIZATION: PEcAn Project, authors affiliations diff --git a/models/maat/NEWS.md b/models/maat/NEWS.md new file mode 100644 index 00000000000..7d116480248 --- /dev/null +++ b/models/maat/NEWS.md @@ -0,0 +1,7 @@ +# PEcAn.MAAT 1.7.3.9000 + +## License change +* PEcAn.MAAT is now distributed under the BSD three-clause license instead of the NCSA Open Source license. + +## Added +* Added a `NEWS.md` file to track changes to the package. Prior to this point changes are tracked in the main CHANGELOG for the PEcAn repository. diff --git a/models/maat/R/met2model.MAAT.R b/models/maat/R/met2model.MAAT.R index 8997e890704..c7609bad305 100644 --- a/models/maat/R/met2model.MAAT.R +++ b/models/maat/R/met2model.MAAT.R @@ -1,11 +1,3 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2012 University of Illinois, NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# University of Illinois/NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- ## R Code to convert NetCDF CF met files into MAAT model met files @@ -30,6 +22,7 @@ PREFIX_XML <- "\n" ##' @param verbose should the function be very verbose ##' @param leap_year Enforce Leap-years? If set to TRUE, will require leap years to have 366 days. ##' If set to false, will require all years to have 365 days. Default = TRUE. +##' @param ... additional arguments, currently ignored ##' @export ##' @author Shawn P. Serbin ##' diff --git a/models/maat/R/model2netcdf.MAAT.R b/models/maat/R/model2netcdf.MAAT.R index b254699e42d..dd1a21a9080 100755 --- a/models/maat/R/model2netcdf.MAAT.R +++ b/models/maat/R/model2netcdf.MAAT.R @@ -1,14 +1,3 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2012 University of Illinois, NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# University of Illinois/NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- - - -##-------------------------------------------------------------------------------------------------# ##' @title Function to convert MAAT model output to standard netCDF format ##' ##' @param rundir Location of MAAT model run (i.e. MAAT project) directory with all required model run inputs. diff --git a/models/maat/R/version.R b/models/maat/R/version.R new file mode 100644 index 00000000000..0e58d885272 --- /dev/null +++ b/models/maat/R/version.R @@ -0,0 +1,3 @@ +# Set at package install time, used by pecan.all::pecan_version() +# to identify development versions of packages +.build_hash <- Sys.getenv("PECAN_GIT_REV", "unknown") diff --git a/models/maat/R/write.config.MAAT.R b/models/maat/R/write.config.MAAT.R index 3b492ddf684..825cefe061f 100644 --- a/models/maat/R/write.config.MAAT.R +++ b/models/maat/R/write.config.MAAT.R @@ -1,24 +1,16 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2012 University of Illinois, NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# University of Illinois/NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- - ##-------------------------------------------------------------------------------------------------# ## Functions to prepare and write out MAAT model xml files for MA, SA, and Ensemble runs PREFIX_XML <- "\n" ##-------------------------------------------------------------------------------------------------# ##------------------------------------------------------------------------------------------------# +##' Convert samples for MAAT +##' ##' convert parameters and parameter names from PEcAn database default units/names with MAAT ##' ##' Performs model specific unit conversions on a a list of trait values, ##' such as those provided to write.config -##' @name convert.samples.MAAT -##' @title Convert samples for MAAT +##' ##' @param trait.samples a matrix or dataframe of samples from the trait distribution ##' @param runid optional parameter for debugging ##' @return matrix or dataframe with values transformed @@ -103,10 +95,8 @@ convert.samples.MAAT <- function(trait.samples, runid) { ##' Requires a pft xml object, a list of trait values for a single model run, ##' and the name of the file to create ##' -##' @name write.config.MAAT -##' @title Write MAAT model configuration files ##' @param defaults list of defaults to process -##' @param trait.samples vector of samples for a given trait +##' @param trait.values vector of samples for a given trait ##' @param settings list of settings from pecan settings file ##' @param run.id id of run ##' @return configuration file for MAAT for given run diff --git a/models/maat/man/met2model.MAAT.Rd b/models/maat/man/met2model.MAAT.Rd index 237b8b816c2..b6e4ce7f27e 100644 --- a/models/maat/man/met2model.MAAT.Rd +++ b/models/maat/man/met2model.MAAT.Rd @@ -33,6 +33,8 @@ met2model.MAAT( \item{leap_year}{Enforce Leap-years? If set to TRUE, will require leap years to have 366 days. If set to false, will require all years to have 365 days. Default = TRUE.} + +\item{...}{additional arguments, currently ignored} } \description{ met2model wrapper for MAAT diff --git a/models/maat/man/write.config.MAAT.Rd b/models/maat/man/write.config.MAAT.Rd index e1704cf03a8..1ef53872054 100644 --- a/models/maat/man/write.config.MAAT.Rd +++ b/models/maat/man/write.config.MAAT.Rd @@ -2,26 +2,23 @@ % Please edit documentation in R/write.config.MAAT.R \name{write.config.MAAT} \alias{write.config.MAAT} -\title{Write MAAT model configuration files} +\title{Writes a MAAT config file.} \usage{ write.config.MAAT(defaults = NULL, trait.values, settings, run.id) } \arguments{ \item{defaults}{list of defaults to process} +\item{trait.values}{vector of samples for a given trait} + \item{settings}{list of settings from pecan settings file} \item{run.id}{id of run} - -\item{trait.samples}{vector of samples for a given trait} } \value{ configuration file for MAAT for given run } \description{ -Writes a MAAT config file. -} -\details{ Requires a pft xml object, a list of trait values for a single model run, and the name of the file to create } diff --git a/models/maat/tests/Rcheck_reference.log b/models/maat/tests/Rcheck_reference.log index 57d8d7f45c7..251779d0728 100644 --- a/models/maat/tests/Rcheck_reference.log +++ b/models/maat/tests/Rcheck_reference.log @@ -12,50 +12,9 @@ Maintainer: ‘Shawn Serbin ’ New submission -License components with restrictions and base license permitting such: - BSD_3_clause + file LICENSE -File 'LICENSE': - ## This is the master copy of the PEcAn License - - University of Illinois/NCSA Open Source License - - Copyright (c) 2012, University of Illinois, NCSA. All rights reserved. - - PEcAn project - www.pecanproject.org - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal with the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - - Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimers. - - Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimers in the - documentation and/or other materials provided with the distribution. - - Neither the names of University of Illinois, NCSA, nor the names - of its contributors may be used to endorse or promote products - derived from this Software without specific prior written permission. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - IN NO EVENT SHALL THE CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR - ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF - CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE SOFTWARE. - Strong dependencies not in mainstream repositories: PEcAn.data.atmosphere, PEcAn.logger, PEcAn.remote, PEcAn.settings, PEcAn.utils - -Authors@R field should be a call to person(), or combine such calls. - -The Date field is over a month old. * checking package namespace information ... OK * checking package dependencies ... OK * checking if this is a source package ... OK @@ -109,20 +68,7 @@ to your NAMESPACE file. * checking Rd cross-references ... OK * checking for missing documentation entries ... OK * checking for code/documentation mismatches ... OK -* checking Rd \usage sections ... WARNING -Undocumented arguments in documentation object 'met2model.MAAT' - ‘...’ - -Undocumented arguments in documentation object 'write.config.MAAT' - ‘trait.values’ -Documented arguments not in \usage in documentation object 'write.config.MAAT': - ‘trait.samples’ - -Functions with \usage entries need to have the appropriate \alias -entries, and all their arguments documented. -The \usage entries must correspond to syntactically valid R code. -See chapter ‘Writing R documentation files’ in the ‘Writing R -Extensions’ manual. +* checking Rd \usage sections ... OK * checking Rd contents ... OK * checking for unstated dependencies in examples ... OK * checking files in ‘vignettes’ ... WARNING diff --git a/models/maat/vignettes/create_amerifluxLBL_drivers_for_maat.Rmd b/models/maat/vignettes/create_amerifluxLBL_drivers_for_maat.Rmd index 8dabad23071..a5be19dd635 100644 --- a/models/maat/vignettes/create_amerifluxLBL_drivers_for_maat.Rmd +++ b/models/maat/vignettes/create_amerifluxLBL_drivers_for_maat.Rmd @@ -2,7 +2,10 @@ title: "PEcAn: Generating met drivers for the MAAT model using AmerifluxLBL tower observations" author: "Shawn Serbin" date: "2018-08-28" -output: html_document +output: rmarkdown::html_vignette +vignette: | + %\VignetteIndexEntry{PEcAn: Generating met drivers for the MAAT model using AmerifluxLBL tower observations} + %\VignetteEngine{knitr::rmarkdown} --- # Overview @@ -13,27 +16,25 @@ In this example we will download 6 years of met data from the [Willow Creek Eddy The PEcAn.data.atmosphere source code is in [`modules/data.atmosphere`](https://github.com/PecanProject/pecan/tree/develop/modules/data.atmosphere) and the documentation can be found with either `package?PEcAn.data.atmosphere` or in the [data.atmosphere package documentation](https://pecanproject.github.io/pecan//modules/data.atmosphere/inst/web/index.html). First make sure you have the nescessary PEcAn package for processing met data -```{r install} -devtools::install_github("pecanproject/pecan", ref = "develop", subdir = "modules/data.atmosphere") +```{r install, eval=FALSE} +options(repos = c(getOption("repos"), PEcAn = "pecanproject.r-universe.dev")) +install.packages("PEcAn.data.atmosphere") ``` ```{r} -library(knitr) -library(ggplot2) -library(ggthemes) library(PEcAn.data.atmosphere) ``` ```{r echo=FALSE} -knitr::opts_chunk$set(message = FALSE, warnings = FALSE, cache = FALSE, +knitr::opts_chunk$set(message = FALSE, warnings = FALSE, cache = FALSE, fig.height= 3, fig.width = 8) ``` ## Connect to BETY database and retrieve appropriate format for AmerifluxLBL datasets [TODO: Update to work with R-OpenSci traits BETY] -```{r bety} +```{r bety, eval = FALSE} con <- PEcAn.DB::db.open( list(user='bety', password='bety', host='localhost', @@ -42,30 +43,94 @@ format.id <- 5000000002 format <- PEcAn.DB::query.format.vars(format.id=format.id,bety = con) format$time_zone <- "America/Chicago" ``` +(Note: To avoid needing a database connection at vignette build time, we use code that does not display in the compiled vignette to construct a static version of the `format` object. When running this document interactively, with a live database connention, you can use the result from `query.format.vars` directly.) + +```{r dummy-format, show = FALSE} +# copy-pasted from `dput(format)` after evaluating the `bety` chunk above +# using Chris Black's development version of BeTY on 2024-09-16 +format <- list( + file_name = "AMERIFLUX_BASE_HH", + mimetype = "csv", + vars = structure( + list( + bety_name = c("air_pressure", "airT", "co2atm", "datetime", "FC", "H", + "LE", "NEE", "PAR", "precipitation_flux", "relative_humidity", + "soilM", "soilT", "solar_radiation", "specific_humidity", + "surface_downwelling_longwave_flux_in_air", "TotalResp", "UST", + "VPD", "wind_direction", "Wspd"), + variable_id = c(554, 86, 135, 5000000001, 1000000042, 299, 298, 297, + 136, 555, 556, 391, 379, 547, 557, 561, 1000000206, 1000000041, 387, + 560, 390), + input_name = c("PA", "TA", "CO2_1", "TIMESTAMP_START", "FC", "H", "LE", + "NEE_PI", "PPFD_IN", "P", "RH", "SWC_1", "TS_1", "SW_IN", "H2O", + "LW_IN", "RECO_PI", "USTAR", "VPD", "WD", "WS"), + input_units = c("kPa", "celsius", "umol mol-1", "ymd_hms", + "umol C m-2 s-1", "W m-2","W m-2", "umol C m-2 s-1", "umol m-2 s-1", + "kg m-2 (30 minute)-1", "percent", "%", "celsius", "W m-2", + "g kg-1", "W m-2", "umol C m-2 s-1", "m s-1", "hPa", "degrees", + "m s-1"), + storage_type = c("", "", "", "%Y%m%d%H%M", "", "", "", "", "", "", "", + "", "", "", "", "", "", "", "", "", ""), + column_number = c(19L, 4L, 20L, 1L, 8L, 10L, 12L, 7L, 26L, 17L, 18L, + 23L, 15L, 27L, 33L, 31L, NA, 3L, NA, 5L, 6L), + bety_units = c("Pa", "degrees C", "ppm (= umol mol-1)", "ymd_hms", + "umol C m-2 s-1", "W m-2", "W m-2", "umol C m-2 s-1", + "umol m-2 s-1", "Kg/m2/s", "%", "%", "degrees C", "W/m^2", "g g-1", + "W m-2", "kg C m-2 s-1", "m s-1", "Pa", "degrees", "m s-1"), + mstmip_name = c("Psurf", "Tair", "CO2air", NA, "FC", "Qh", "Qle", + "NEE", NA, "Rainf", NA, "SoilMoistFrac", "SoilTemp", "SWdown", + "Qair", "LWdown", "TotalResp", NA, NA, NA, NA), + mstmip_units = c("Pa", "K", "micromol mol-1", NA, "kg C m-2 s-1", + "W m-2", "W m-2", "kg C m-2 s-1", NA, "kg m-2 s-1", NA, "1", "K", + "W/m^2", "kg kg-1", "W/m2", "kg C m-2 s-1", NA, NA, NA, NA), + pecan_name = c("Psurf", "Tair", "CO2air", "datetime", "FC", "Qh", + "Qle", "NEE", "PAR", "Rainf", "relative_humidity", "SoilMoistFrac", + "SoilTemp", "SWdown", "Qair", "LWdown", "TotalResp", "UST", "VPD", + "wind_direction", "Wspd"), + pecan_units = c("Pa", "K", "micromol mol-1", "ymd_hms", "kg C m-2 s-1", + "W m-2", "W m-2", "kg C m-2 s-1", "umol m-2 s-1", "kg m-2 s-1", "%", + "1", "K", "W/m^2", "kg kg-1", "W/m2", "kg C m-2 s-1", "m s-1", "Pa", + "degrees", "m s-1")), + row.names = c(NA, -21L), + class = "data.frame"), + skip = 2, + header = 1, + na.strings = c("-9999", "-6999", "9999", "NA"), + time.row = 4L, + site = NULL, + lat = NULL, + lon = NULL, + time_zone = "America/Chicago") +``` + ## Download AmerifluxLBL data for selected site and dates (US-WCr, 2000 to 2005) ```{r download} -download.AmerifluxLBL(sitename = "US-WCr", outfolder = "~/scratch/met2model_testing", +m2mdir <- tempfile("met2model_testing") +dir.create(m2mdir) +download.AmerifluxLBL(sitename = "US-WCr", outfolder = m2mdir, start_date = "2000-01-01", end_date = "2005-12-31", overwrite = TRUE, verbose = TRUE) ``` ## Convert downloaded AmerifluxLBL data to CF format ```{r met2cf} -met2CF.AmerifluxLBL(in.path = "~/scratch/met2model_testing/", in.prefix = "AMF_US-WCr", outfolder = "~/scratch/met2model_testing/CF/", +cfdir <- file.path(m2mdir, "CF") +met2CF.AmerifluxLBL(in.path = m2mdir, in.prefix = "AMF_US-WCr", outfolder = cfdir, start_date = "2000-01-01", end_date = "2005-12-31",format=format) ``` ## Gapfill CF met drivers ```{r metgapfill} -metgapfill(in.path = "~/scratch/met2model_testing/CF/", in.prefix = "AMF_US-WCr_BASE_HH_14-5", - outfolder = "~/scratch/met2model_testing/CF/gapfill/", start_date = "2000-01-01", end_date = "2005-12-31") +gapfilldir <- file.path(cfdir, "gapfill") +metgapfill(in.path = cfdir, in.prefix = "AMF_US-WCr_BASE_HH_14-5", + outfolder = gapfilldir, start_date = "2000-01-01", end_date = "2005-12-31") ``` ## Create MAAT model-specific met drivers ```{r met2model} -in.path <- "~/scratch/met2model_testing/CF/gapfill/" +in.path <- gapfilldir in.prefix <- "AMF_US-WCr_BASE_HH_14-5" -outfolder <- "~/scratch/met2model_testing/CF/gapfill/maat_drivers/" +outfolder <- file.path(gapfilldir, "maat_drivers") start_date <- "2000-01-01" end_date <- "2005-12-31" overwrite <- TRUE diff --git a/models/maat/vignettes/running_maat_in_pecan.Rmd b/models/maat/vignettes/running_maat_in_pecan.Rmd index 19682997c80..9a0a5890ea0 100644 --- a/models/maat/vignettes/running_maat_in_pecan.Rmd +++ b/models/maat/vignettes/running_maat_in_pecan.Rmd @@ -1,8 +1,10 @@ --- title: "Running MAAT in PEcAn" author: "Shawn Serbin" -date: "2018-08-28" -output: html_document +output: rmarkdown::html_vignette +vignette: | + %\VignetteIndexEntry{Running MAAT in PEcAn} + %\VignetteEngine{knitr::rmarkdown} --- ```{r echo=FALSE} @@ -23,21 +25,19 @@ Follow the instructions found here: https://github.com/walkeranthonyp/MAAT/blob/ ## Installing the R packages -```{r install, eval = -(1:6)} -devtools::install_github("pecanproject/pecan", ref = "develop", subdir = "base/logger") -devtools::install_github("PecanProject/pecan", ref = "develop", subdir= "base/remote") -devtools::install_github("pecanproject/pecan", ref = "develop", subdir = "base/utils") -devtools::install_github("pecanproject/pecan", ref = "develop", subdir = "base/settings") -devtools::install_github("pecanproject/pecan", ref = "develop", subdir = "modules/data.atmosphere") -devtools::install_github("pecanproject/pecan", ref = "develop", subdir = "models/maat") +```{r install, eval = FALSE} +options(repos = c(getOption("repos"), PEcAn = "pecanproject.r-universe.dev")) +install.packages("PEcAn.MAAT") +``` +```{r load-pkg} library(PEcAn.MAAT) ``` ## MAAT XML configuration ### E.g. Running without met drivers and using user-specified met conditions -```{xml maat_nomet} +```{cat maat_nomet} leaf @@ -65,7 +65,7 @@ library(PEcAn.MAAT) ``` ### E.g. Running with met drivers -```{xml maat_met} +```{cat maat_met} leaf @@ -85,7 +85,7 @@ library(PEcAn.MAAT) ### E.g. pecan.xml file with MAAT configuration options. In this example the MAAT model is configured for a temperate deciduous forest based on the temperate.deciduous PFT in the BETYdb database (https://www.betydb.org/pfts/2000000044). -```{xml pecan_xml} +```{cat pecan_xml} ~/scratch/maat_pecan_test_run/ @@ -187,13 +187,14 @@ In this example the MAAT model is configured for a temperate deciduous forest ba ## Simple MAAT run in PEcAn In this example we will run ten MAAT model ensembles in serial based on parameter values derived from the temperate.deciduous PFT in the BETYdb database (https://www.betydb.org/pfts/2000000044) and the PEcAn meta analysis step -```{r run_maat} -library(PEcAn.all) -library(PEcAn.utils) +This chunk is shown but is not currently executed at vignette build time, because running it requires a connection to the PEcAn database. -setwd("~") -getwd() +```{r run_maat, eval = FALSE} +rundir <- tempfile("maat_pecan_test_run") +dir.create(rundir) +setwd(rundir) settings <- PEcAn.settings::read.settings(system.file("pecan.maat.xml", package="PEcAn.MAAT", mustWork = TRUE)) +settings$outdir <- rundir settings <- PEcAn.settings::prepare.settings(settings, force=FALSE) PEcAn.logger::logger.info(paste0("Main output directory: ",settings$outdir)) diff --git a/models/maespa/.Rbuildignore b/models/maespa/.Rbuildignore new file mode 100644 index 00000000000..12ebc8111ea --- /dev/null +++ b/models/maespa/.Rbuildignore @@ -0,0 +1,2 @@ +Dockerfile +model_info.json \ No newline at end of file diff --git a/models/maespa/DESCRIPTION b/models/maespa/DESCRIPTION index f4212c71d11..40d1f78eaa3 100644 --- a/models/maespa/DESCRIPTION +++ b/models/maespa/DESCRIPTION @@ -1,13 +1,10 @@ Package: PEcAn.MAESPA Type: Package Title: PEcAn Functions Used for Ecological Forecasts and Reanalysis using MAESPA -Version: 1.7.2 -Date: 2021-10-04 +Version: 1.7.3.9000 Authors@R: c(person("Tony", "Gardella", role = c("aut", "cre"), email = "tonygard@bu.edu"), person("University of Illinois, NCSA", role = c("cph"))) -Author: Tony Gardella -Maintainer: Tony Gardella Description: The Predictive Ecosystem Carbon Analyzer (PEcAn) is a scientific workflow management tool that is designed to simplify the management of model parameterization, execution, and analysis. The goal of PECAn is to @@ -25,12 +22,12 @@ Suggests: Maeswrap, coda, testthat (>= 1.0.2) +Remotes: + github::RemkoDuursma/Maeswrap SystemRequirements: MAESPA ecosystem model OS_type: unix License: BSD_3_clause + file LICENSE Copyright: Authors -LazyLoad: yes LazyData: FALSE -Require: Encoding: UTF-8 -RoxygenNote: 7.2.3 +RoxygenNote: 7.3.2 diff --git a/models/maespa/Dockerfile b/models/maespa/Dockerfile index 0b5fbd1be54..22243dc3c1b 100644 --- a/models/maespa/Dockerfile +++ b/models/maespa/Dockerfile @@ -4,7 +4,7 @@ ARG IMAGE_VERSION="latest" # ---------------------------------------------------------------------- # BUILD MODEL BINARY # ---------------------------------------------------------------------- -FROM pecan/models:${IMAGE_VERSION} as model-binary +FROM pecan/models:${IMAGE_VERSION} AS model-binary # download, unzip and build ed2 WORKDIR /src diff --git a/models/maespa/LICENSE b/models/maespa/LICENSE index 5a9e44128f1..09ef35a60b4 100644 --- a/models/maespa/LICENSE +++ b/models/maespa/LICENSE @@ -1,34 +1,3 @@ -## This is the master copy of the PEcAn License - -University of Illinois/NCSA Open Source License - -Copyright (c) 2012, University of Illinois, NCSA. All rights reserved. - -PEcAn project -www.pecanproject.org - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal with the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -- Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimers. -- Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimers in the - documentation and/or other materials provided with the distribution. -- Neither the names of University of Illinois, NCSA, nor the names - of its contributors may be used to endorse or promote products - derived from this Software without specific prior written permission. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR -ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF -CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE SOFTWARE. - +YEAR: 2024 +COPYRIGHT HOLDER: PEcAn Project +ORGANIZATION: PEcAn Project, authors affiliations diff --git a/models/maespa/NEWS.md b/models/maespa/NEWS.md new file mode 100644 index 00000000000..dfe735a50f5 --- /dev/null +++ b/models/maespa/NEWS.md @@ -0,0 +1,7 @@ +# PEcAn.MAESPA 1.7.3.9000 + +## License change +* PEcAn.MAESPA is now distributed under the BSD three-clause license instead of the NCSA Open Source license. + +## Added +* Added a `NEWS.md` file to track changes to the package. Prior to this point changes are tracked in the main CHANGELOG for the PEcAn repository. diff --git a/models/maespa/R/met2model.MAESPA.R b/models/maespa/R/met2model.MAESPA.R index cec8c29147d..e24aa9d1c51 100755 --- a/models/maespa/R/met2model.MAESPA.R +++ b/models/maespa/R/met2model.MAESPA.R @@ -1,11 +1,3 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2012 University of Illinois, NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# University of Illinois/NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- # R Code to convert NetCDF CF met files into MAESPA met files @@ -24,6 +16,7 @@ ##' @param end_date the end date of the data to be downloaded (will only use the year part of the date) ##' @param overwrite should existing files be overwritten ##' @param verbose should the function be very verbose +##' @param ... further arguments, currently ignored ##' ##' @author Tony Gardella met2model.MAESPA <- function(in.path, in.prefix, outfolder, start_date, end_date, diff --git a/models/maespa/R/model2netcdf.MAESPA.R b/models/maespa/R/model2netcdf.MAESPA.R index b47f7cd2211..65a29b71243 100755 --- a/models/maespa/R/model2netcdf.MAESPA.R +++ b/models/maespa/R/model2netcdf.MAESPA.R @@ -1,13 +1,3 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2012 University of Illinois, NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# University of Illinois/NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- - -## -------------------------------------------------------------------------------------------------# ##' Convert MAESPA output into the NACP Intercomparison format (ALMA using netCDF) ##' ##' @name model2netcdf.MAESPA diff --git a/models/maespa/R/version.R b/models/maespa/R/version.R new file mode 100644 index 00000000000..0e58d885272 --- /dev/null +++ b/models/maespa/R/version.R @@ -0,0 +1,3 @@ +# Set at package install time, used by pecan.all::pecan_version() +# to identify development versions of packages +.build_hash <- Sys.getenv("PECAN_GIT_REV", "unknown") diff --git a/models/maespa/R/write.config.MAESPA.R b/models/maespa/R/write.config.MAESPA.R index c0b98aa72ff..6adfd849eea 100755 --- a/models/maespa/R/write.config.MAESPA.R +++ b/models/maespa/R/write.config.MAESPA.R @@ -1,13 +1,3 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2012 University of Illinois, NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# University of Illinois/NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- - -## -------------------------------------------------------------------------------------------------# ##' Writes a config file for Maespa ##' ##' Requires a pft xml object, a list of trait values for a single model run, @@ -16,7 +6,7 @@ ##' @name write.config.MAESPA ##' @title Write MAESPA configuration files ##' @param defaults list of defaults to process -##' @param trait.samples vector of samples for a given trait +##' @param trait.values vector of samples for a given trait ##' @param settings list of settings from pecan settings file ##' @param run.id id of run ##' @return configuration file for MAESPA for given run @@ -25,10 +15,10 @@ ##-------------------------------------------------------------------------------------------------# write.config.MAESPA <- function(defaults, trait.values, settings, run.id) { - if(!require("Maeswrap")){ - logger.severe("The Maeswrap package is not installed. + if(!requireNamespace("Maeswrap", quietly = TRUE)){ + PEcAn.logger::logger.severe("The Maeswrap package is not installed. Please consult PEcAn documentation for install notes: - https://pecanproject.github.io/pecan-documentation/master/pecan-models.html#maespa") + https://pecanproject.github.io/pecan-documentation/latest/pecan-models.html#maespa") } # find out where to write run/ouput diff --git a/models/maespa/README.md b/models/maespa/README.md index 21fab390d62..876659bbe20 100644 --- a/models/maespa/README.md +++ b/models/maespa/README.md @@ -30,6 +30,11 @@ library(remotes) install_github('pecanproject/pecan', subdir = "models/maespa") ``` +When writing configurations files PEcAn.MAESPA uses the `Maeswrap` package. +We recommend Maeswrap 1.8.0 or later, which you can install from GitHub with `remotes::install_github("RemkoDuursma/Maeswrap")`. +An older version of Maeswrap (1.7 at this writing in September 2024) is also available from CRAN and does provide the functionality PEcAn.MAESPA needs, but this version has a strong dependency on the `rgl` package, which can be hard to install and are not needed by PEcAn. If you have trouble installing the extra dependencies or if you are annoyed by an unwanted plot window opening every time you use PEcAn.MAESPA, try upgrading Maeswrap to 1.8 or later. + + ## Example This is a basic example which shows you how to solve a common problem: diff --git a/models/maespa/man/met2model.MAESPA.Rd b/models/maespa/man/met2model.MAESPA.Rd index b6809be239b..f261a78c777 100644 --- a/models/maespa/man/met2model.MAESPA.Rd +++ b/models/maespa/man/met2model.MAESPA.Rd @@ -29,6 +29,8 @@ met2model.MAESPA( \item{overwrite}{should existing files be overwritten} \item{verbose}{should the function be very verbose} + +\item{...}{further arguments, currently ignored} } \description{ met2model wrapper for MAESPA diff --git a/models/maespa/man/write.config.MAESPA.Rd b/models/maespa/man/write.config.MAESPA.Rd index b830dba5daa..855f2aedb2f 100644 --- a/models/maespa/man/write.config.MAESPA.Rd +++ b/models/maespa/man/write.config.MAESPA.Rd @@ -9,11 +9,11 @@ write.config.MAESPA(defaults, trait.values, settings, run.id) \arguments{ \item{defaults}{list of defaults to process} +\item{trait.values}{vector of samples for a given trait} + \item{settings}{list of settings from pecan settings file} \item{run.id}{id of run} - -\item{trait.samples}{vector of samples for a given trait} } \value{ configuration file for MAESPA for given run diff --git a/models/maespa/tests/Rcheck_reference.log b/models/maespa/tests/Rcheck_reference.log index 9916a699928..afacbd204ec 100644 --- a/models/maespa/tests/Rcheck_reference.log +++ b/models/maespa/tests/Rcheck_reference.log @@ -12,50 +12,11 @@ Maintainer: ‘Tony Gardella ’ New submission -License components with restrictions and base license permitting such: - BSD_3_clause + file LICENSE -File 'LICENSE': - ## This is the master copy of the PEcAn License - - University of Illinois/NCSA Open Source License - - Copyright (c) 2012, University of Illinois, NCSA. All rights reserved. - - PEcAn project - www.pecanproject.org - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal with the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - - Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimers. - - Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimers in the - documentation and/or other materials provided with the distribution. - - Neither the names of University of Illinois, NCSA, nor the names - of its contributors may be used to endorse or promote products - derived from this Software without specific prior written permission. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - IN NO EVENT SHALL THE CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR - ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF - CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE SOFTWARE. - Unknown, possibly misspelled, fields in DESCRIPTION: ‘Require’ Strong dependencies not in mainstream repositories: PEcAn.data.atmosphere, PEcAn.logger, PEcAn.remote, PEcAn.utils - -The Date field is over a month old. * checking package namespace information ... OK * checking package dependencies ... OK * checking if this is a source package ... OK @@ -69,14 +30,8 @@ The Date field is over a month old. * checking installed package size ... OK * checking package directory ... OK * checking for future file timestamps ... OK -* checking DESCRIPTION meta-information ... NOTE -Author field differs from that derived from Authors@R - Author: ‘Tony Gardella’ - Authors@R: ‘Tony Gardella [aut, cre], University of Illinois, NCSA [cph]’ - -* checking top-level files ... NOTE -Non-standard files/directories found at top level: - ‘Dockerfile’ ‘model_info.json’ +* checking DESCRIPTION meta-information ... OK +* checking top-level files ... OK * checking for left-over files ... OK * checking index information ... OK * checking package subdirectories ... OK @@ -107,20 +62,7 @@ Undefined global functions or variables: * checking Rd cross-references ... OK * checking for missing documentation entries ... OK * checking for code/documentation mismatches ... OK -* checking Rd \usage sections ... WARNING -Undocumented arguments in documentation object 'met2model.MAESPA' - ‘...’ - -Undocumented arguments in documentation object 'write.config.MAESPA' - ‘trait.values’ -Documented arguments not in \usage in documentation object 'write.config.MAESPA': - ‘trait.samples’ - -Functions with \usage entries need to have the appropriate \alias -entries, and all their arguments documented. -The \usage entries must correspond to syntactically valid R code. -See chapter ‘Writing R documentation files’ in the ‘Writing R -Extensions’ manual. +* checking Rd \usage sections ... OK * checking Rd contents ... OK * checking for unstated dependencies in examples ... OK * checking examples ... NONE diff --git a/models/maespa/tests/testthat.R b/models/maespa/tests/testthat.R index f9eec820a52..78aacd1ba29 100755 --- a/models/maespa/tests/testthat.R +++ b/models/maespa/tests/testthat.R @@ -1,8 +1,3 @@ -# ------------------------------------------------------------------------------- Copyright (c) 2012 -# University of Illinois, NCSA. All rights reserved. This program and the accompanying materials are -# made available under the terms of the University of Illinois/NCSA Open Source License which -# accompanies this distribution, and is available at http://opensource.ncsa.illinois.edu/license.html -# ------------------------------------------------------------------------------- library(testthat) library(PEcAn.utils) diff --git a/models/preles/DESCRIPTION b/models/preles/DESCRIPTION index 69f31e3add3..125a9ffddbe 100644 --- a/models/preles/DESCRIPTION +++ b/models/preles/DESCRIPTION @@ -1,15 +1,12 @@ Package: PEcAn.PRELES Type: Package Title: PEcAn Package for Integration of the PRELES Model -Version: 1.7.2 -Date: 2021-10-04 +Version: 1.7.3.9000 Authors@R: c(person("Mike", "Dietze", role = c("aut"), email = "dietze@bu.edu"), person("Tony", "Gardella", role = c("aut", "cre"), email = "tonygard@bu.edu"), person("University of Illinois, NCSA", role = c("cph"))) -Author: Tony Gardella, Mike Dietze -Maintainer: Tony Gardella Description: This module provides functions to run the PREdict Light use efficiency Evapotranspiration and Soil moisture (PRELES) model on the PEcAn project. The Predictive Ecosystem Carbon Analyzer (PEcAn) is a scientific @@ -17,8 +14,6 @@ Description: This module provides functions to run the PREdict Light use parameterization,execution, and analysis. The goal of PECAn is to streamline the interaction between data and models, and to improve the efficacy of scientific investigation. -Depends: - PEcAn.utils Imports: PEcAn.logger, lubridate (>= 1.6.0), @@ -33,7 +28,5 @@ Remotes: OS_type: unix License: BSD_3_clause + file LICENSE Copyright: Authors -LazyLoad: yes -LazyData: FALSE Encoding: UTF-8 -RoxygenNote: 7.2.3 +RoxygenNote: 7.3.2 diff --git a/models/preles/LICENSE b/models/preles/LICENSE index 5a9e44128f1..09ef35a60b4 100644 --- a/models/preles/LICENSE +++ b/models/preles/LICENSE @@ -1,34 +1,3 @@ -## This is the master copy of the PEcAn License - -University of Illinois/NCSA Open Source License - -Copyright (c) 2012, University of Illinois, NCSA. All rights reserved. - -PEcAn project -www.pecanproject.org - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal with the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -- Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimers. -- Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimers in the - documentation and/or other materials provided with the distribution. -- Neither the names of University of Illinois, NCSA, nor the names - of its contributors may be used to endorse or promote products - derived from this Software without specific prior written permission. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR -ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF -CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE SOFTWARE. - +YEAR: 2024 +COPYRIGHT HOLDER: PEcAn Project +ORGANIZATION: PEcAn Project, authors affiliations diff --git a/models/preles/NEWS.md b/models/preles/NEWS.md new file mode 100644 index 00000000000..460c0509e66 --- /dev/null +++ b/models/preles/NEWS.md @@ -0,0 +1,10 @@ +# PEcAn.PRELES 1.7.3.9000 + +## License change +* PEcAn.PRELES is now distributed under the BSD three-clause license instead of the NCSA Open Source license. + + +# PEcAn.PRELES 1.7.1 + +* All changes in 1.7.1 and earlier were recorded in a single file for all of the PEcAn packages; please see +https://github.com/PecanProject/pecan/blob/v1.7.1/CHANGELOG.md for details. diff --git a/models/preles/R/runPRELES.jobsh.R b/models/preles/R/runPRELES.jobsh.R index deb925a4b3a..db3dbc8f46d 100644 --- a/models/preles/R/runPRELES.jobsh.R +++ b/models/preles/R/runPRELES.jobsh.R @@ -1,26 +1,22 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2012 University of Illinois, NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# University of Illinois/NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- - -#--------------------------------------------------------------------------------------------------# -##' @title Function to process ncdf file, run PRELES model, and convert output .nc file in CF standard -##' @param in.path location on disk where inputs are stored -##' @param in.prefix prefix of input and output files -##' @param outdir Location of PRELES model output -##' @param start_date Start time of the simulation -##' @param end_date End time of the simulation -##' @export -##' @author Tony Gardella, Michael Dietze +#' Process ncdf file, run PRELES model, and convert output .nc file in CF standard +#' +#' @param met.file base name for yearly nc files containing met data. +#' Example: `met.file="somefile"` matches somefile.2004.nc, somefile.2005.nc, etc. +#' @param outdir Location of PRELES model output +#' @param parameters An R data file containing parameter values. +#' Must be an Rda file written via `save()`, and must define an object named +#' `trait.values` +#' @param sitelat,sitelon Latitude and longitude of site in decimal degrees +#' @param start.date,end.date Start and end time of the simulation +#' +#' @export +#' @author Tony Gardella, Michael Dietze runPRELES.jobsh <- function(met.file, outdir, parameters, sitelat, sitelon, start.date, end.date) { - if(!require("Rpreles")){ - logger.severe("The Rpreles package is not installed. - Please execute- devtools::install_github('MikkoPeltoniemi/Rpreles')") + if (!requireNamespace("Rpreles", quietly = TRUE)) { + PEcAn.logger::logger.severe( + "The Rpreles package is not installed. + Please execute- devtools::install_github('MikkoPeltoniemi/Rpreles')") } # Process start and end dates @@ -120,10 +116,10 @@ runPRELES.jobsh <- function(met.file, outdir, parameters, sitelat, sitelon, star #30.tsumcrit -999 fPheno_budburst_Tsum, 134 birch ## Replace default with sampled parameters - load(parameters) - params <- data.frame(trait.values) - colnames <- c(names(trait.values[[1]])) - colnames(params) <- colnames + param_objs <- new.env() + load(parameters, envir = param_objs) + params <- data.frame(param_objs$trait.values) + colnames(params) <- names(param_objs$trait.values[[1]]) param.def[5] <- as.numeric(params["bGPP"]) param.def[9] <- as.numeric(params["kGPP"]) diff --git a/models/preles/R/version.R b/models/preles/R/version.R new file mode 100644 index 00000000000..0e58d885272 --- /dev/null +++ b/models/preles/R/version.R @@ -0,0 +1,3 @@ +# Set at package install time, used by pecan.all::pecan_version() +# to identify development versions of packages +.build_hash <- Sys.getenv("PECAN_GIT_REV", "unknown") diff --git a/models/preles/R/write.config.PRELES.R b/models/preles/R/write.config.PRELES.R index fb31d2e705d..42e8433c241 100644 --- a/models/preles/R/write.config.PRELES.R +++ b/models/preles/R/write.config.PRELES.R @@ -1,19 +1,9 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2012 University of Illinois, NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# University of Illinois/NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- - -##-------------------------------------------------------------------------------------------------# ##' Writes a PRELES config file. ##' ##' @name write.config.PRELES ##' @title Write PRELES configuration files ##' @param defaults list of defaults to process -##' @param trait.samples vector of samples for a given trait +##' @param trait.values vector of samples for a given trait ##' @param settings list of settings from pecan settings file ##' @param run.id id of run ##' @return configuration file for PRELES for given run diff --git a/models/preles/man/runPRELES.jobsh.Rd b/models/preles/man/runPRELES.jobsh.Rd index 6588c3971d2..3890dca8f4b 100644 --- a/models/preles/man/runPRELES.jobsh.Rd +++ b/models/preles/man/runPRELES.jobsh.Rd @@ -2,7 +2,7 @@ % Please edit documentation in R/runPRELES.jobsh.R \name{runPRELES.jobsh} \alias{runPRELES.jobsh} -\title{Function to process ncdf file, run PRELES model, and convert output .nc file in CF standard} +\title{Process ncdf file, run PRELES model, and convert output .nc file in CF standard} \usage{ runPRELES.jobsh( met.file, @@ -15,18 +15,21 @@ runPRELES.jobsh( ) } \arguments{ -\item{outdir}{Location of PRELES model output} +\item{met.file}{base name for yearly nc files containing met data. +Example: `met.file="somefile"` matches somefile.2004.nc, somefile.2005.nc, etc.} -\item{in.path}{location on disk where inputs are stored} +\item{outdir}{Location of PRELES model output} -\item{in.prefix}{prefix of input and output files} +\item{parameters}{An R data file containing parameter values. +Must be an Rda file written via `save()`, and must define an object named +`trait.values`} -\item{start_date}{Start time of the simulation} +\item{sitelat, sitelon}{Latitude and longitude of site in decimal degrees} -\item{end_date}{End time of the simulation} +\item{start.date, end.date}{Start and end time of the simulation} } \description{ -Function to process ncdf file, run PRELES model, and convert output .nc file in CF standard +Process ncdf file, run PRELES model, and convert output .nc file in CF standard } \author{ Tony Gardella, Michael Dietze diff --git a/models/preles/man/write.config.PRELES.Rd b/models/preles/man/write.config.PRELES.Rd index ae20b961c74..a3068c26cf4 100644 --- a/models/preles/man/write.config.PRELES.Rd +++ b/models/preles/man/write.config.PRELES.Rd @@ -9,11 +9,11 @@ write.config.PRELES(defaults, trait.values, settings, run.id) \arguments{ \item{defaults}{list of defaults to process} +\item{trait.values}{vector of samples for a given trait} + \item{settings}{list of settings from pecan settings file} \item{run.id}{id of run} - -\item{trait.samples}{vector of samples for a given trait} } \value{ configuration file for PRELES for given run diff --git a/models/preles/tests/Rcheck_reference.log b/models/preles/tests/Rcheck_reference.log index a82d631af9a..a7c0c05acb7 100644 --- a/models/preles/tests/Rcheck_reference.log +++ b/models/preles/tests/Rcheck_reference.log @@ -12,43 +12,6 @@ Maintainer: ‘Tony Gardella ’ New submission -License components with restrictions and base license permitting such: - BSD_3_clause + file LICENSE -File 'LICENSE': - ## This is the master copy of the PEcAn License - - University of Illinois/NCSA Open Source License - - Copyright (c) 2012, University of Illinois, NCSA. All rights reserved. - - PEcAn project - www.pecanproject.org - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal with the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - - Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimers. - - Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimers in the - documentation and/or other materials provided with the distribution. - - Neither the names of University of Illinois, NCSA, nor the names - of its contributors may be used to endorse or promote products - derived from this Software without specific prior written permission. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - IN NO EVENT SHALL THE CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR - ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF - CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE SOFTWARE. - Unknown, possibly misspelled, fields in DESCRIPTION: ‘Remotes’ @@ -56,8 +19,6 @@ Strong dependencies not in mainstream repositories: PEcAn.utils, PEcAn.logger, PEcAn.data.atmosphere Suggests or Enhances not in mainstream repositories: Rpreles - -The Date field is over a month old. * checking package namespace information ... OK * checking package dependencies ... OK * checking if this is a source package ... OK @@ -71,14 +32,7 @@ The Date field is over a month old. * checking installed package size ... OK * checking package directory ... OK * checking for future file timestamps ... OK -* checking DESCRIPTION meta-information ... NOTE -Author field differs from that derived from Authors@R - Author: ‘Tony Gardella, Mike Dietze’ - Authors@R: ‘Mike Dietze [aut], Tony Gardella [aut, cre], University of Illinois, NCSA [cph]’ - -Package listed in more than one of Depends, Imports, Suggests, Enhances: - ‘PEcAn.utils’ -A package should be listed in only one of these fields. +* checking DESCRIPTION meta-information ... OK * checking top-level files ... OK * checking for left-over files ... OK * checking index information ... OK @@ -92,44 +46,18 @@ A package should be listed in only one of these fields. * checking whether the namespace can be unloaded cleanly ... OK * checking loading without being on the library search path ... OK * checking use of S3 registration ... OK -* checking dependencies in R code ... NOTE -'library' or 'require' call to ‘Rpreles’ in package code. - Please use :: or requireNamespace() instead. - See section 'Suggested packages' in the 'Writing R Extensions' manual. -Package in Depends field not imported from: ‘PEcAn.utils’ - These packages need to be imported from (in the NAMESPACE file) - for when this namespace is loaded but not attached. +* checking dependencies in R code ... OK * checking S3 generic/method consistency ... OK * checking replacement functions ... OK * checking foreign function calls ... OK -* checking R code for possible problems ... NOTE -runPRELES.jobsh: no visible global function definition for - ‘logger.severe’ -runPRELES.jobsh: no visible binding for global variable ‘trait.values’ -Undefined global functions or variables: - logger.severe trait.values +* checking R code for possible problems ... OK * checking Rd files ... OK * checking Rd metadata ... OK * checking Rd line widths ... OK * checking Rd cross-references ... OK * checking for missing documentation entries ... OK * checking for code/documentation mismatches ... OK -* checking Rd \usage sections ... WARNING -Undocumented arguments in documentation object 'runPRELES.jobsh' - ‘met.file’ ‘parameters’ ‘sitelat’ ‘sitelon’ ‘start.date’ ‘end.date’ -Documented arguments not in \usage in documentation object 'runPRELES.jobsh': - ‘in.path’ ‘in.prefix’ ‘start_date’ ‘end_date’ - -Undocumented arguments in documentation object 'write.config.PRELES' - ‘trait.values’ -Documented arguments not in \usage in documentation object 'write.config.PRELES': - ‘trait.samples’ - -Functions with \usage entries need to have the appropriate \alias -entries, and all their arguments documented. -The \usage entries must correspond to syntactically valid R code. -See chapter ‘Writing R documentation files’ in the ‘Writing R -Extensions’ manual. +* checking Rd \usage sections ... OK * checking Rd contents ... OK * checking for unstated dependencies in examples ... OK * checking examples ... NONE @@ -139,4 +67,4 @@ Extensions’ manual. * checking for detritus in the temp directory ... OK * DONE -Status: 2 WARNINGs, 3 NOTEs +Status: 1 WARNING, 0 NOTEs diff --git a/models/preles/tests/testthat.R b/models/preles/tests/testthat.R index 73f5a5f9fb8..4d3ef970446 100644 --- a/models/preles/tests/testthat.R +++ b/models/preles/tests/testthat.R @@ -1,11 +1,3 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2012 University of Illinois, NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# University of Illinois/NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- library(testthat) library(PEcAn.utils) diff --git a/models/sibcasa/DESCRIPTION b/models/sibcasa/DESCRIPTION index bd89456a341..0c3bae6faed 100644 --- a/models/sibcasa/DESCRIPTION +++ b/models/sibcasa/DESCRIPTION @@ -1,8 +1,7 @@ Package: PEcAn.SIBCASA Type: Package Title: PEcAn Package for Integration of the SiBCASA Model -Version: 0.0.1 -Date: 2021-10-07 +Version: 0.0.1.9000 Authors@R: c(person("Rob", "Kooper", role = "cre", email = "kooper@illinois.edu"), person("Tony", "Gardella", role = c("aut"), @@ -15,8 +14,7 @@ Description: This module provides functions to link (SiBCASA) to PEcAn. It is a work in progress and is not yet fully functional. Imports: ncdf4, - PEcAn.logger, - PEcAn.utils (>= 1.4.8) + PEcAn.logger Suggests: testthat (>= 3.0.0), withr @@ -26,4 +24,4 @@ License: BSD_3_clause + file LICENSE Copyright: Authors LazyData: TRUE Encoding: UTF-8 -RoxygenNote: 7.2.3 +RoxygenNote: 7.3.2 diff --git a/models/sibcasa/LICENSE b/models/sibcasa/LICENSE index 5a9e44128f1..09ef35a60b4 100644 --- a/models/sibcasa/LICENSE +++ b/models/sibcasa/LICENSE @@ -1,34 +1,3 @@ -## This is the master copy of the PEcAn License - -University of Illinois/NCSA Open Source License - -Copyright (c) 2012, University of Illinois, NCSA. All rights reserved. - -PEcAn project -www.pecanproject.org - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal with the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -- Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimers. -- Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimers in the - documentation and/or other materials provided with the distribution. -- Neither the names of University of Illinois, NCSA, nor the names - of its contributors may be used to endorse or promote products - derived from this Software without specific prior written permission. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR -ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF -CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE SOFTWARE. - +YEAR: 2024 +COPYRIGHT HOLDER: PEcAn Project +ORGANIZATION: PEcAn Project, authors affiliations diff --git a/models/sibcasa/NEWS.md b/models/sibcasa/NEWS.md new file mode 100644 index 00000000000..1b1812106da --- /dev/null +++ b/models/sibcasa/NEWS.md @@ -0,0 +1,8 @@ +# PEcAn.SIBCASA 0.0.1.9000 + +## License change +* PEcAn.SIBCASA is now distributed under the BSD three-clause license instead of the NCSA Open Source license. + +# PEcAn.LDNDC 0.0.1 + +First unstable public release. This package is experimental. \ No newline at end of file diff --git a/models/sibcasa/R/met2model.SIBCASA.R b/models/sibcasa/R/met2model.SIBCASA.R index 65ba5c7725d..5dc81323f70 100644 --- a/models/sibcasa/R/met2model.SIBCASA.R +++ b/models/sibcasa/R/met2model.SIBCASA.R @@ -1,11 +1,3 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2012 University of Illinois, NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# University of Illinois/NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- #' Write SIBCASA met files #' @@ -24,7 +16,7 @@ met2model.SIBCASA <- function(in.path, in.prefix, outfolder, overwrite = FALSE) PEcAn.logger::logger.severe("NOT IMPLEMENTED") # Please follow the PEcAn style guide: - # https://pecanproject.github.io/pecan-documentation/master/coding-style.html + # https://pecanproject.github.io/pecan-documentation/latest/coding-style.html # Note that `library()` calls should _never_ appear here; instead, put # packages dependencies in the DESCRIPTION file, under "Imports:". diff --git a/models/sibcasa/R/model2netcdf.SIBCASA.R b/models/sibcasa/R/model2netcdf.SIBCASA.R index 0c482fffd4d..236b705599c 100644 --- a/models/sibcasa/R/model2netcdf.SIBCASA.R +++ b/models/sibcasa/R/model2netcdf.SIBCASA.R @@ -1,11 +1,3 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2012 University of Illinois, NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# University of Illinois/NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- #-------------------------------------------------------------------------------------------------# #' Convert SIBCASA output into the NACP Intercomparison format (ALMA using netCDF) diff --git a/models/sibcasa/R/version.R b/models/sibcasa/R/version.R new file mode 100644 index 00000000000..0e58d885272 --- /dev/null +++ b/models/sibcasa/R/version.R @@ -0,0 +1,3 @@ +# Set at package install time, used by pecan.all::pecan_version() +# to identify development versions of packages +.build_hash <- Sys.getenv("PECAN_GIT_REV", "unknown") diff --git a/models/sibcasa/R/write.config.SIBCASA.R b/models/sibcasa/R/write.config.SIBCASA.R index efc5b2f48e0..1b390c4dee7 100644 --- a/models/sibcasa/R/write.config.SIBCASA.R +++ b/models/sibcasa/R/write.config.SIBCASA.R @@ -1,11 +1,3 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2012 University of Illinois, NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# University of Illinois/NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- #-------------------------------------------------------------------------------------------------# #' Writes a SIBCASA config file. diff --git a/models/sibcasa/tests/Rcheck_reference.log b/models/sibcasa/tests/Rcheck_reference.log index af5be9bc108..8ce4873d212 100644 --- a/models/sibcasa/tests/Rcheck_reference.log +++ b/models/sibcasa/tests/Rcheck_reference.log @@ -12,47 +12,8 @@ Maintainer: ‘Rob Kooper ’ New submission -License components with restrictions and base license permitting such: - BSD_3_clause + file LICENSE -File 'LICENSE': - ## This is the master copy of the PEcAn License - - University of Illinois/NCSA Open Source License - - Copyright (c) 2012, University of Illinois, NCSA. All rights reserved. - - PEcAn project - www.pecanproject.org - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal with the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - - Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimers. - - Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimers in the - documentation and/or other materials provided with the distribution. - - Neither the names of University of Illinois, NCSA, nor the names - of its contributors may be used to endorse or promote products - derived from this Software without specific prior written permission. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - IN NO EVENT SHALL THE CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR - ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF - CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE SOFTWARE. - Strong dependencies not in mainstream repositories: PEcAn.logger, PEcAn.utils - -The Date field is over a month old. * checking package namespace information ... OK * checking package dependencies ... OK * checking if this is a source package ... OK @@ -80,9 +41,7 @@ The Date field is over a month old. * checking whether the namespace can be unloaded cleanly ... OK * checking loading without being on the library search path ... OK * checking use of S3 registration ... OK -* checking dependencies in R code ... NOTE -Namespace in Imports field not imported from: ‘PEcAn.utils’ - All declared Imports should be used. +* checking dependencies in R code ... OK * checking S3 generic/method consistency ... OK * checking replacement functions ... OK * checking foreign function calls ... OK @@ -109,4 +68,4 @@ Namespace in Imports field not imported from: ‘PEcAn.utils’ * checking for detritus in the temp directory ... OK * DONE -Status: 1 WARNING, 1 NOTE +Status: 1 WARNING, 0 NOTEs diff --git a/models/sibcasa/tests/testthat.R b/models/sibcasa/tests/testthat.R index d93798b4ffe..b82fe6345ad 100644 --- a/models/sibcasa/tests/testthat.R +++ b/models/sibcasa/tests/testthat.R @@ -1,13 +1,4 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2012 University of Illinois, NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# University of Illinois/NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- library(testthat) -library(PEcAn.utils) PEcAn.logger::logger.setQuitOnSevere(FALSE) #test_check("PEcAn.ModelName") diff --git a/models/sipnet/.Rbuildignore b/models/sipnet/.Rbuildignore new file mode 100644 index 00000000000..2d28facae40 --- /dev/null +++ b/models/sipnet/.Rbuildignore @@ -0,0 +1,2 @@ +Dockerfile +model_info.json diff --git a/models/sipnet/DESCRIPTION b/models/sipnet/DESCRIPTION index 8b677428f80..0a79488bbe5 100644 --- a/models/sipnet/DESCRIPTION +++ b/models/sipnet/DESCRIPTION @@ -1,26 +1,25 @@ Package: PEcAn.SIPNET Type: Package Title: PEcAn Functions Used for Ecological Forecasts and Reanalysis -Version: 1.7.2 -Date: 2021-10-04 +Version: 1.8.0.9000 Authors@R: c(person("Mike", "Dietze", role = c("aut", "cre"), email = "dietze@bu.edu"), person("University of Illinois, NCSA", role = c("cph"))) -Author: Mike Dietze -Maintainer: Mike Dietze Description: The Predictive Ecosystem Carbon Analyzer (PEcAn) is a scientific workflow management tool that is designed to simplify the management of model parameterization, execution, and analysis. The goal of PECAn is to streamline the interaction between data and models, and to improve the efficacy of scientific investigation. -Depends: - PEcAn.data.atmosphere Imports: + dplyr, + lubridate (>= 1.6.0), + ncdf4 (>= 1.15), + PEcAn.data.atmosphere, + PEcAn.data.land, PEcAn.logger, PEcAn.remote, PEcAn.utils, - lubridate (>= 1.6.0), - ncdf4 (>= 1.15), + stats Suggests: coda, testthat (>= 1.0.2) @@ -28,8 +27,5 @@ SystemRequirements: SIPNET ecosystem model OS_type: unix License: BSD_3_clause + file LICENSE Copyright: Authors -LazyLoad: yes -LazyData: FALSE Encoding: UTF-8 -Require: -RoxygenNote: 7.2.3 +RoxygenNote: 7.3.2 diff --git a/models/sipnet/Dockerfile b/models/sipnet/Dockerfile index 1b0fc75725e..d69e845dd4d 100644 --- a/models/sipnet/Dockerfile +++ b/models/sipnet/Dockerfile @@ -4,7 +4,7 @@ ARG IMAGE_VERSION="latest" # ---------------------------------------------------------------------- # BUILD SIPNET BINARY # ---------------------------------------------------------------------- -FROM pecan/models:${IMAGE_VERSION} as model-binary +FROM pecan/models:${IMAGE_VERSION} AS model-binary # Some variables that can be used to set control the docker build ARG MODEL_VERSION=git diff --git a/models/sipnet/LICENSE b/models/sipnet/LICENSE index 5a9e44128f1..09ef35a60b4 100644 --- a/models/sipnet/LICENSE +++ b/models/sipnet/LICENSE @@ -1,34 +1,3 @@ -## This is the master copy of the PEcAn License - -University of Illinois/NCSA Open Source License - -Copyright (c) 2012, University of Illinois, NCSA. All rights reserved. - -PEcAn project -www.pecanproject.org - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal with the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -- Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimers. -- Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimers in the - documentation and/or other materials provided with the distribution. -- Neither the names of University of Illinois, NCSA, nor the names - of its contributors may be used to endorse or promote products - derived from this Software without specific prior written permission. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR -ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF -CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE SOFTWARE. - +YEAR: 2024 +COPYRIGHT HOLDER: PEcAn Project +ORGANIZATION: PEcAn Project, authors affiliations diff --git a/models/sipnet/NAMESPACE b/models/sipnet/NAMESPACE index cdef11b5367..c1a2cf4a98e 100644 --- a/models/sipnet/NAMESPACE +++ b/models/sipnet/NAMESPACE @@ -11,3 +11,4 @@ export(split_inputs.SIPNET) export(veg2model.SIPNET) export(write.config.SIPNET) export(write_restart.SIPNET) +importFrom(dplyr,"%>%") diff --git a/models/sipnet/NEWS.md b/models/sipnet/NEWS.md new file mode 100644 index 00000000000..e79578461af --- /dev/null +++ b/models/sipnet/NEWS.md @@ -0,0 +1,13 @@ +# PEcAn.SIPNET 1.8.0.9000 + +## License change +* PEcAn.SIPNET is now distributed under the BSD three-clause license instead of the NCSA Open Source license. + +# PEcAn.SIPNET 1.8.0 + +* Support for all Sipnet variables in read_restart and write_restart, for integration with state data assimilation workflows + +# PEcAn.SIPNET 1.7.1 + +* All changes in 1.7.1 and earlier were recorded in a single file for all of the PEcAn packages; please see +https://github.com/PecanProject/pecan/blob/v1.7.1/CHANGELOG.md for details. diff --git a/models/sipnet/R/met2model.SIPNET.R b/models/sipnet/R/met2model.SIPNET.R index c4369cba3b5..d1c57084318 100644 --- a/models/sipnet/R/met2model.SIPNET.R +++ b/models/sipnet/R/met2model.SIPNET.R @@ -1,13 +1,3 @@ - -#------------------------------------------------------------------------------- -# Copyright (c) 2012 University of Illinois, NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# University of Illinois/NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- - # R Code to convert NetCDF CF met files into SIPNET met files ## If files already exist in 'Outfolder', the default function is NOT to overwrite them and only @@ -26,6 +16,7 @@ ##' @param overwrite should existing files be overwritten ##' @param verbose should the function be very verbose ##' @param year.fragment the function should ignore whether or not the data is stored as a set of complete years (such as for forecasts). +##' @param ... Additional arguments, currently ignored ##' @author Luke Dramko, Michael Dietze, Alexey Shiklomanov, Rob Kooper met2model.SIPNET <- function(in.path, in.prefix, outfolder, start_date, end_date, overwrite = FALSE, verbose = FALSE, year.fragment = FALSE, ...) { @@ -42,7 +33,7 @@ met2model.SIPNET <- function(in.path, in.prefix, outfolder, start_date, end_date PEcAn.logger::logger.severe(paste0("No files found matching ", in.prefix, "; cannot process data.")) } - # This function is supposed to process netcdf files, so we'll search for files the the extension .nc and use those first. + # This function is supposed to process netcdf files, so we'll search for files with the extension .nc and use those first. nc_file = grep("\\.nc$", matching_files) if (length(nc_file) > 0) { if (grepl("\\.nc$", in.prefix)) { @@ -54,7 +45,7 @@ met2model.SIPNET <- function(in.path, in.prefix, outfolder, start_date, end_date } else { # no .nc files found... it could be that the extension was left off, or some other problem PEcAn.logger::logger.warn("No files found with extension '.nc'. Using the first file in the list below:") PEcAn.logger::logger.warn(matching_files) - in.prefix <- matching_files[i] + in.prefix <- matching_files[1] } } else { # Default behavior out.file <- paste(in.prefix, strptime(start_date, "%Y-%m-%d"), @@ -164,7 +155,7 @@ met2model.SIPNET <- function(in.path, in.prefix, outfolder, start_date, end_date tau <- 15 * tstep filt <- exp(-(1:length(Tair)) / tau) filt <- (filt / sum(filt)) - soilT <- convolve(Tair, filt) + soilT <- stats::convolve(Tair, filt) soilT <- PEcAn.utils::ud_convert(soilT, "K", "degC") PEcAn.logger::logger.info("soil_temperature absent; soilT approximated from Tair") } else { diff --git a/models/sipnet/R/model2netcdf.SIPNET.R b/models/sipnet/R/model2netcdf.SIPNET.R index 33ca6480dc7..438a5c43de3 100644 --- a/models/sipnet/R/model2netcdf.SIPNET.R +++ b/models/sipnet/R/model2netcdf.SIPNET.R @@ -1,12 +1,3 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2012 University of Illinois, NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# University of Illinois/NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- - #' Merge multiple NetCDF files into one #' #' @param files \code{character}. List of filepaths, which should lead to NetCDF files. @@ -86,8 +77,6 @@ sipnet2datetime <- function(sipnet_tval, base_year, base_month = 1, ##' Convert SIPNET output to netCDF ##' ##' Converts all output contained in a folder to netCDF. -##' @name model2netcdf.SIPNET -##' @title Function to convert SIPNET model output to standard netCDF format ##' ##' @param outdir Location of SIPNET model output ##' @param sitelat Latitude of the site @@ -98,7 +87,7 @@ sipnet2datetime <- function(sipnet_tval, base_year, base_month = 1, ##' @param overwrite Flag for overwriting nc files or not ##' @param conflict Flag for dealing with conflicted nc files, if T we then will merge those, if F we will jump to the next. ##' @param prefix prefix to read the output files -##' @param delete.raw Flag to remove sipnet.out files, FALSE = do not remove files TRUE = remove files +##' @param delete.raw logical: remove sipnet.out files after converting? ##' ##' @export ##' @author Shawn Serbin, Michael Dietze @@ -140,7 +129,7 @@ model2netcdf.SIPNET <- function(outdir, sitelat, sitelon, start_date, end_date, for (y in year_seq) { #initialize the conflicted as FALSE conflicted <- FALSE - + conflict <- TRUE #conflict is set to TRUE to enable the rename of yearly nc file for merging SDA results with sub-annual data #if we have conflicts on this file. if (file.exists(file.path(outdir, paste(y, "nc", sep = "."))) & overwrite == FALSE & conflict == FALSE) { next @@ -296,13 +285,19 @@ model2netcdf.SIPNET <- function(outdir, sitelat, sitelon, start_date, end_date, close(varfile) ncdf4::nc_close(nc) - #merge nc files + #merge nc files of the same year together to enable the assimilation of sub-annual data if(file.exists(file.path(outdir, "previous.nc"))){ files <- c(file.path(outdir, "previous.nc"), file.path(outdir, "current.nc")) }else{ files <- file.path(outdir, "current.nc") } mergeNC(files = files, outfile = file.path(outdir, paste(y, "nc", sep = "."))) + #The command "cdo" in mergeNC will automatically rename "time_bounds" to "time_bnds". However, "time_bounds" is used + #in read_restart codes later. So we need to read the new NetCDF file and convert the variable name back. + nc<- ncdf4::nc_open(file.path(outdir, paste(y, "nc", sep = ".")),write=TRUE) + nc<-ncdf4::ncvar_rename(nc,"time_bnds","time_bounds") + ncdf4::ncatt_put(nc, "time", "bounds","time_bounds", prec=NA) + ncdf4::nc_close(nc) unlink(files, recursive = T) }else{ nc <- ncdf4::nc_create(file.path(outdir, paste(y, "nc", sep = ".")), nc_var) @@ -323,4 +318,4 @@ model2netcdf.SIPNET <- function(outdir, sitelat, sitelon, start_date, end_date, } } # model2netcdf.SIPNET #--------------------------------------------------------------------------------------------------# -### EOF \ No newline at end of file +### EOF diff --git a/models/sipnet/R/read_restart.SIPNET.R b/models/sipnet/R/read_restart.SIPNET.R index adda2f55716..2260c71a476 100755 --- a/models/sipnet/R/read_restart.SIPNET.R +++ b/models/sipnet/R/read_restart.SIPNET.R @@ -1,12 +1,3 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2012 University of Illinois, NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# University of Illinois/NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- - ##' @title Read restart function for SDA with SIPNET ##' ##' @author Ann Raiho \email{araiho@@nd.edu} diff --git a/models/sipnet/R/sample.IC.SIPNET.R b/models/sipnet/R/sample.IC.SIPNET.R index d1578c4df33..540a7bf5006 100644 --- a/models/sipnet/R/sample.IC.SIPNET.R +++ b/models/sipnet/R/sample.IC.SIPNET.R @@ -1,31 +1,21 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2012 University of Illinois, NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# University of Illinois/NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- - -## samples intial conditions for SIPNET -##' @title sample.IC.SIPNET -##' @name sample.IC.SIPNET -##' @author Mike Dietze and Ann Raiho -##' -##' @param ne number of ensembles -##' @param state state variables you want to pull -##' @description samples intial conditions for SIPNET -##' -##' @return IC matrix of initial conditions -##' @export -##' +#' Sample initial conditions for SIPNET +#' +#' @author Mike Dietze and Ann Raiho +#' +#' @param ne number of ensembles +#' @param state state variables you want to pull +#' @param year year to pull from +#' +#' @return IC matrix of initial conditions +#' @export +#' sample.IC.SIPNET <- function(ne, state, year = 1) { ## Mg C / ha / yr GWBI ## no conversion needed because SIPNET doesn't take GWBI as IC anyway GWBI <- ifelse(rep("GWBI" %in% names(state), ne), state$GWBI[sample.int(length(state$GWBI), ne)], ## unit MgC ha-1 yr-1 - runif(ne, 0, 10)) ## prior + stats::runif(ne, 0, 10)) ## prior # g C * m-2 ground area in wood (above-ground + roots) Mgha2gm <- (1000000) / (10000) # these unit conversions are for testing @@ -33,12 +23,12 @@ sample.IC.SIPNET <- function(ne, state, year = 1) { # using MgC ha-1 yr-1 for NPP in SDA and also brought back AbvGrndWood to MgC ha-1 for sanity reasons AbvGrndWood <- ifelse(rep("AbvGrndWood" %in% names(state), ne), PEcAn.utils::ud_convert(state$AbvGrndWood[sample.int(length(state$AbvGrndWood), ne)], "Mg/ha", "g/m^2"), - runif(ne, 700, 15000)) ## prior + stats::runif(ne, 700, 15000)) ## prior # sipnet accepts a plantWoodC pool that is above-ground + roots # instead of roots having their own state, we'll pass around fractions to update them deterministically - fine_root_carbon_content <- runif(ne, 100, 1000) - coarse_root_carbon_content <- runif(ne, 200, 2000) + fine_root_carbon_content <- stats::runif(ne, 100, 1000) + coarse_root_carbon_content <- stats::runif(ne, 200, 2000) wood_total_C <- AbvGrndWood + fine_root_carbon_content + coarse_root_carbon_content @@ -48,39 +38,39 @@ sample.IC.SIPNET <- function(ne, state, year = 1) { # initial leaf area, m2 leaves * m-2 ground area (multiply by leafCSpWt to ## get initial plant leaf C) - lai <- ifelse(rep("LAI" %in% names(state), ne), - state$LAI[1, sample.int(ncol(state$LAI), ne), year], - runif(ne, 0, 7)) ## prior - + lai <- ifelse(rep("LAI" %in% names(state), ne), + state$LAI[1, sample.int(ncol(state$LAI), ne), year], + stats::runif(ne, 0, 7)) ## prior + ## g C * m-2 ground area - litter <- ifelse(rep("litter" %in% names(state), ne), - state$litter[1, sample.int(ncol(state$litter), ne), year], - runif(ne, 130, 1200)) ## prior - + litter <- ifelse(rep("litter" %in% names(state), ne), + state$litter[1, sample.int(ncol(state$litter), ne), year], + stats::runif(ne, 130, 1200)) ## prior + ## g C * m-2 ground area - soil <- ifelse(rep("soil" %in% names(state), ne), - state$soil[1, sample.int(ncol(state$soil), ne), year], - runif(ne, 1200, 2000)) ## prior - + soil <- ifelse(rep("soil" %in% names(state), ne), + state$soil[1, sample.int(ncol(state$soil), ne), year], + stats::runif(ne, 1200, 2000)) ## prior + ## unitless: fraction of litterWHC - litterWFrac <- ifelse(rep("litterW" %in% names(state), ne), - state$litterW[1, sample.int(ncol(state$litterW), ne), year], - runif(ne)) ## prior - + litterWFrac <- ifelse(rep("litterW" %in% names(state), ne), + state$litterW[1, sample.int(ncol(state$litterW), ne), year], + stats::runif(ne)) ## prior + ## unitless: fraction of soilWHC - soilWFrac <- ifelse(rep("soilW" %in% names(state), ne), + soilWFrac <- ifelse(rep("soilW" %in% names(state), ne), state$soilW[1, sample.int(ncol(state$soilW), ne), year], - runif(ne)) ## prior - + stats::runif(ne)) ## prior + ## cm water equiv - snow <- ifelse(rep("snow" %in% names(state), ne), - state$snow[1, sample.int(ncol(state$snow), ne), year], - runif(ne, 0, 2000)) ## prior - - microbe <- ifelse(rep("microbe" %in% names(state), ne), - state$microbe[1, sample.int(ncol(state$microbe), ne), year], - runif(ne, 0.02, 1)) ## prior - + snow <- ifelse(rep("snow" %in% names(state), ne), + state$snow[1, sample.int(ncol(state$snow), ne), year], + stats::runif(ne, 0, 2000)) ## prior + + microbe <- ifelse(rep("microbe" %in% names(state), ne), + state$microbe[1, sample.int(ncol(state$microbe), ne), year], + stats::runif(ne, 0.02, 1)) ## prior + return(data.frame(GWBI, AbvGrndWood, abvGrndWoodFrac, coarseRootFrac, fineRootFrac, lai, litter, soil, litterWFrac, soilWFrac, snow, microbe)) } # sample.IC.SIPNET diff --git a/models/sipnet/R/split_inputs.SIPNET.R b/models/sipnet/R/split_inputs.SIPNET.R index cd674647817..4bc6de7dfa6 100644 --- a/models/sipnet/R/split_inputs.SIPNET.R +++ b/models/sipnet/R/split_inputs.SIPNET.R @@ -1,12 +1,3 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2012 University of Illinois, NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# University of Illinois/NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- - ## split clim file into smaller time units to use in KF ##' @title split_inputs.SIPNET ##' @name split_inputs.SIPNET @@ -21,6 +12,8 @@ ##' @description Splits climate met for SIPNET ##' ##' @return file split up climate file +##' +##' @importFrom dplyr %>% ##' @export split_inputs.SIPNET <- function(settings, start.time, stop.time, inputs, overwrite = FALSE, outpath = NULL) { #### Get met paths diff --git a/models/sipnet/R/veg2model.SIPNET.R b/models/sipnet/R/veg2model.SIPNET.R index e57f8106cc1..741e37a6748 100644 --- a/models/sipnet/R/veg2model.SIPNET.R +++ b/models/sipnet/R/veg2model.SIPNET.R @@ -1,7 +1,4 @@ -#' veg2model.SIPNET -#' @name veg2model.SIPNET -#' @title veg2model.SIPNET -#' +#' veg2model.SIPNET #' #' @param outfolder location to store ncdf files #' @param poolinfo object passed from write_ic contains output from cohort2pool function diff --git a/models/sipnet/R/version.R b/models/sipnet/R/version.R new file mode 100644 index 00000000000..0e58d885272 --- /dev/null +++ b/models/sipnet/R/version.R @@ -0,0 +1,3 @@ +# Set at package install time, used by pecan.all::pecan_version() +# to identify development versions of packages +.build_hash <- Sys.getenv("PECAN_GIT_REV", "unknown") diff --git a/models/sipnet/R/write.configs.SIPNET.R b/models/sipnet/R/write.configs.SIPNET.R index 91c5224c9a0..c294808b7b5 100755 --- a/models/sipnet/R/write.configs.SIPNET.R +++ b/models/sipnet/R/write.configs.SIPNET.R @@ -1,16 +1,14 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2012 University of Illinois, NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# University of Illinois/NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- - -#--------------------------------------------------------------------------------------------------# ##' Writes a configuration files for your model ##' @name write.config.SIPNET ##' @title Writes a configuration files for SIPNET model +##' @param defaults pft +##' @param trait.values vector of samples for a given trait +##' @param settings PEcAn settings object +##' @param run.id run ID +##' @param inputs list of model inputs +##' @param IC initial condition +##' @param restart In case this is a continuation of an old simulation. restart needs to be a list with name tags of runid, inputs, new.params (parameters), new.state (initial condition), ensemble.id (ensemble id), start.time and stop.time.See Details. +##' @param spinup currently unused, included for compatibility with other models ##' @export ##' @author Michael Dietze write.config.SIPNET <- function(defaults, trait.values, settings, run.id, inputs = NULL, IC = NULL, @@ -61,7 +59,6 @@ write.config.SIPNET <- function(defaults, trait.values, settings, run.id, inputs cdosetup <- paste(cdosetup, sep = "\n", paste(settings$host$cdosetup, collapse = "\n")) } - hostteardown <- "" if (!is.null(settings$model$postrun)) { hostteardown <- paste(hostteardown, sep = "\n", paste(settings$model$postrun, collapse = "\n")) @@ -69,6 +66,23 @@ write.config.SIPNET <- function(defaults, trait.values, settings, run.id, inputs if (!is.null(settings$host$postrun)) { hostteardown <- paste(hostteardown, sep = "\n", paste(settings$host$postrun, collapse = "\n")) } + + # create rabbitmq specific setup. + cpruncmd <- cpoutcmd <- rmoutdircmd <- rmrundircmd <- "" + if (!is.null(settings$host$rabbitmq)) { + #rsync cmd from remote to local host. + settings$host$rabbitmq$cpfcmd <- ifelse(is.null(settings$host$rabbitmq$cpfcmd), "", settings$host$rabbitmq$cpfcmd) + cpruncmd <- gsub("@OUTDIR@", settings$host$rundir, settings$host$rabbitmq$cpfcmd) + cpruncmd <- gsub("@OUTFOLDER@", rundir, cpruncmd) + + cpoutcmd <- gsub("@OUTDIR@", settings$host$outdir, settings$host$rabbitmq$cpfcmd) + cpoutcmd <- gsub("@OUTFOLDER@", outdir, cpoutcmd) + + #delete files within rundir and outdir. + rmoutdircmd <- paste("rm", file.path(outdir, "*")) + rmrundircmd <- paste("rm", file.path(rundir, "*")) + } + # create job.sh jobsh <- gsub("@HOST_SETUP@", hostsetup, jobsh) jobsh <- gsub("@CDO_SETUP@", cdosetup, jobsh) @@ -87,6 +101,11 @@ write.config.SIPNET <- function(defaults, trait.values, settings, run.id, inputs jobsh <- gsub("@BINARY@", settings$model$binary, jobsh) jobsh <- gsub("@REVISION@", settings$model$revision, jobsh) + jobsh <- gsub("@CPRUNCMD@", cpruncmd, jobsh) + jobsh <- gsub("@CPOUTCMD@", cpoutcmd, jobsh) + jobsh <- gsub("@RMOUTDIRCMD@", rmoutdircmd, jobsh) + jobsh <- gsub("@RMRUNDIRCMD@", rmrundircmd, jobsh) + if(is.null(settings$state.data.assimilation$NC.Prefix)){ settings$state.data.assimilation$NC.Prefix <- "sipnet.out" } @@ -417,7 +436,31 @@ write.config.SIPNET <- function(defaults, trait.values, settings, run.id, inputs if ("leafGrowth" %in% pft.names) { param[which(param[, 1] == "leafGrowth"), 2] <- pft.traits[which(pft.names == "leafGrowth")] } - } ## end loop over PFTS + + #update LeafOnday and LeafOffDay + if (!is.null(settings$run$inputs$leaf_phenology)){ + obs_year_start <- lubridate::year(settings$run$start.date) + obs_year_end <- lubridate::year(settings$run$end.date) + if (obs_year_start != obs_year_end) { + PEcAn.logger::logger.info("Start.date and end.date are not in the same year. Currently start.date is used for refering phenological data") + } + leaf_pheno_path <- settings$run$inputs$leaf_phenology$path ## read from settings + if (!is.null(leaf_pheno_path)){ + ##read data + leafphdata <- utils::read.csv(leaf_pheno_path) + leafOnDay <- leafphdata$leafonday[leafphdata$year == obs_year_start & leafphdata$site_id==settings$run$site$id] + leafOffDay<- leafphdata$leafoffday[leafphdata$year== obs_year_start & leafphdata$site_id==settings$run$site$id] + if (!is.na(leafOnDay)){ + param[which(param[, 1] == "leafOnDay"), 2] <- leafOnDay + } + if (!is.na(leafOffDay)){ + param[which(param[, 1] == "leafOffDay"), 2] <- leafOffDay + } + } else { + PEcAn.logger::logger.info("No phenology data were found. Please consider running `PEcAn.data.remote::extract_phenology_MODIS` to get the parameter file.") + } + } + } ## end loop over PFTS ####### end parameter update #working on reading soil file (only working for 1 soil file) if(length(settings$run$inputs$soilinitcond$path)==1){ @@ -441,7 +484,12 @@ write.config.SIPNET <- function(defaults, trait.values, settings, run.id, inputs plant_wood_vars <- c("AbvGrndWood", "abvGrndWoodFrac", "coarseRootFrac", "fineRootFrac") if (all(plant_wood_vars %in% ic.names)) { # reconstruct total wood C - wood_total_C <- IC$AbvGrndWood / IC$abvGrndWoodFrac + if(IC$abvGrndWoodFrac < 0.05){ + wood_total_C <- IC$AbvGrndWood + }else{ + wood_total_C <- IC$AbvGrndWood / IC$abvGrndWoodFrac + } + #Sanity check if (is.infinite(wood_total_C) | is.nan(wood_total_C) | wood_total_C < 0) { wood_total_C <- 0 @@ -510,6 +558,15 @@ write.config.SIPNET <- function(defaults, trait.values, settings, run.id, inputs if (!is.na(lai) && is.numeric(lai)) { param[which(param[, 1] == "laiInit"), 2] <- lai } + + #Initial LAI is set as 0 for deciduous forests and grasslands for non-growing seasons + if (!(lubridate::month(settings$run$start.date) %in% seq(5,9))){ #Growing seasons are coarsely defined as months from May to September for non-conifers in the US + site_pft <- utils::read.csv(settings$run$inputs$pft.site$path) + site.pft.name <- site_pft$pft[site_pft$site == settings$run$site$id] + if (site.pft.name!="boreal.coniferous") { #Currently only excluding boreal conifers. Other evergreen PFTs could be added here later. + param[which(param[, 1] == "laiInit"), 2] <- 0 + } + } ## neeInit gC/m2 nee <- try(ncdf4::ncvar_get(IC.nc,"nee"),silent = TRUE) if (!is.na(nee) && is.numeric(nee)) { @@ -525,8 +582,10 @@ write.config.SIPNET <- function(defaults, trait.values, settings, run.id, inputs } ## soilWFracInit fraction soilWFrac <- try(ncdf4::ncvar_get(IC.nc,"SoilMoistFrac"),silent = TRUE) - if (!is.na(soilWFrac) && is.numeric(soilWFrac)) { - param[which(param[, 1] == "soilWFracInit"), 2] <- sum(soilWFrac) + if (!"try-error" %in% class(soilWFrac)) { + if (!is.na(soilWFrac) && is.numeric(soilWFrac)) { + param[which(param[, 1] == "soilWFracInit"), 2] <- sum(soilWFrac)/100 + } } ## litterWFracInit fraction litterWFrac <- soilWFrac @@ -605,4 +664,4 @@ remove.config.SIPNET <- function(main.outdir, settings) { } else { print("*** WARNING: Removal of files on remote host not yet implemented ***") } -} # remove.config.SIPNET \ No newline at end of file +} # remove.config.SIPNET diff --git a/models/sipnet/R/write_restart.SIPNET.R b/models/sipnet/R/write_restart.SIPNET.R index 18387a9f893..32d2736312e 100755 --- a/models/sipnet/R/write_restart.SIPNET.R +++ b/models/sipnet/R/write_restart.SIPNET.R @@ -1,14 +1,8 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2012 University of Illinois, NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# University of Illinois/NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- - -##' @title write_restart.SIPNET -##' @name write_restart.SIPNET +##' write_restart.SIPNET +##' +##' Write restart files for SIPNET. +##' WARNING: Some variables produce illegal values < 0 and have been hardcoded to correct these values!! +##' ##' @author Ann Raiho \email{araiho@@nd.edu} ##' ##' @param outdir output directory @@ -20,14 +14,15 @@ ##' @param RENAME flag to either rename output file or not ##' @param new.params list of parameters to convert between different states ##' @param inputs list of model inputs to use in write.configs.SIPNET -##' -##' @description Write restart files for SIPNET. WARNING: Some variables produce illegal values < 0 and have been hardcoded to correct these values!! +##' @param verbose decide if we want to print the outputs. ##' ##' @return NONE +##' +##' @importFrom dplyr %>% ##' @export write_restart.SIPNET <- function(outdir, runid, start.time, stop.time, settings, new.state, - RENAME = TRUE, new.params = FALSE, inputs) { - + RENAME = TRUE, new.params = FALSE, inputs, verbose = FALSE) { + rundir <- settings$host$rundir variables <- colnames(new.state) # values that will be used for updating other states deterministically depending on the SDA states @@ -64,10 +59,10 @@ write_restart.SIPNET <- function(outdir, runid, start.time, stop.time, settings, names(analysis.save[[length(analysis.save)]]) <- c("NEE") } - if ("AbvGrndWood" %in% variables) { - AbvGrndWood <- PEcAn.utils::ud_convert(new.state$AbvGrndWood, "Mg/ha", "g/m^2") - analysis.save[[length(analysis.save) + 1]] <- AbvGrndWood - names(analysis.save[[length(analysis.save)]]) <- c("AbvGrndWood") + if ("AbvGrndWood" %in% variables) { + AbvGrndWood <- PEcAn.utils::ud_convert(new.state$AbvGrndWood, "Mg/ha", "g/m^2") + analysis.save[[length(analysis.save) + 1]] <- AbvGrndWood + names(analysis.save[[length(analysis.save)]]) <- c("AbvGrndWood") } if ("LeafC" %in% variables) { @@ -120,8 +115,10 @@ write_restart.SIPNET <- function(outdir, runid, start.time, stop.time, settings, analysis.save.mat <- NULL } - print(runid %>% as.character()) - print(analysis.save.mat) + if (verbose) { + print(runid %>% as.character()) + print(analysis.save.mat) + } do.call(write.config.SIPNET, args = list(defaults = NULL, trait.values = new.params, settings = settings, @@ -129,4 +126,4 @@ write_restart.SIPNET <- function(outdir, runid, start.time, stop.time, settings, inputs = inputs, IC = analysis.save.mat)) print(runid) -} # write_restart.SIPNET +} # write_restart.SIPNET \ No newline at end of file diff --git a/models/sipnet/man/met2model.SIPNET.Rd b/models/sipnet/man/met2model.SIPNET.Rd index c6ef948abc6..5b138640ea8 100644 --- a/models/sipnet/man/met2model.SIPNET.Rd +++ b/models/sipnet/man/met2model.SIPNET.Rd @@ -32,6 +32,8 @@ met2model.SIPNET( \item{verbose}{should the function be very verbose} \item{year.fragment}{the function should ignore whether or not the data is stored as a set of complete years (such as for forecasts).} + +\item{...}{Additional arguments, currently ignored} } \description{ met2model wrapper for SIPNET diff --git a/models/sipnet/man/model2netcdf.SIPNET.Rd b/models/sipnet/man/model2netcdf.SIPNET.Rd index 3853c307e6a..b88a96198cf 100644 --- a/models/sipnet/man/model2netcdf.SIPNET.Rd +++ b/models/sipnet/man/model2netcdf.SIPNET.Rd @@ -2,7 +2,7 @@ % Please edit documentation in R/model2netcdf.SIPNET.R \name{model2netcdf.SIPNET} \alias{model2netcdf.SIPNET} -\title{Function to convert SIPNET model output to standard netCDF format} +\title{Convert SIPNET output to netCDF} \usage{ model2netcdf.SIPNET( outdir, @@ -28,7 +28,7 @@ model2netcdf.SIPNET( \item{end_date}{End time of the simulation} -\item{delete.raw}{Flag to remove sipnet.out files, FALSE = do not remove files TRUE = remove files} +\item{delete.raw}{logical: remove sipnet.out files after converting?} \item{revision}{model revision} @@ -39,9 +39,6 @@ model2netcdf.SIPNET( \item{conflict}{Flag for dealing with conflicted nc files, if T we then will merge those, if F we will jump to the next.} } \description{ -Convert SIPNET output to netCDF -} -\details{ Converts all output contained in a folder to netCDF. } \author{ diff --git a/models/sipnet/man/sample.IC.SIPNET.Rd b/models/sipnet/man/sample.IC.SIPNET.Rd index 1e29bb2e456..055aa0f7508 100644 --- a/models/sipnet/man/sample.IC.SIPNET.Rd +++ b/models/sipnet/man/sample.IC.SIPNET.Rd @@ -2,7 +2,7 @@ % Please edit documentation in R/sample.IC.SIPNET.R \name{sample.IC.SIPNET} \alias{sample.IC.SIPNET} -\title{sample.IC.SIPNET} +\title{Sample initial conditions for SIPNET} \usage{ sample.IC.SIPNET(ne, state, year = 1) } @@ -10,12 +10,14 @@ sample.IC.SIPNET(ne, state, year = 1) \item{ne}{number of ensembles} \item{state}{state variables you want to pull} + +\item{year}{year to pull from} } \value{ IC matrix of initial conditions } \description{ -samples intial conditions for SIPNET +Sample initial conditions for SIPNET } \author{ Mike Dietze and Ann Raiho diff --git a/models/sipnet/man/write.config.SIPNET.Rd b/models/sipnet/man/write.config.SIPNET.Rd index 889b9237cda..32c3e334238 100644 --- a/models/sipnet/man/write.config.SIPNET.Rd +++ b/models/sipnet/man/write.config.SIPNET.Rd @@ -15,6 +15,23 @@ write.config.SIPNET( spinup = NULL ) } +\arguments{ +\item{defaults}{pft} + +\item{trait.values}{vector of samples for a given trait} + +\item{settings}{PEcAn settings object} + +\item{run.id}{run ID} + +\item{inputs}{list of model inputs} + +\item{IC}{initial condition} + +\item{restart}{In case this is a continuation of an old simulation. restart needs to be a list with name tags of runid, inputs, new.params (parameters), new.state (initial condition), ensemble.id (ensemble id), start.time and stop.time.See Details.} + +\item{spinup}{currently unused, included for compatibility with other models} +} \description{ Writes a configuration files for your model } diff --git a/models/sipnet/man/write_restart.SIPNET.Rd b/models/sipnet/man/write_restart.SIPNET.Rd index d080faabd93..cc3264ec23a 100644 --- a/models/sipnet/man/write_restart.SIPNET.Rd +++ b/models/sipnet/man/write_restart.SIPNET.Rd @@ -13,7 +13,8 @@ write_restart.SIPNET( new.state, RENAME = TRUE, new.params = FALSE, - inputs + inputs, + verbose = FALSE ) } \arguments{ @@ -34,12 +35,15 @@ write_restart.SIPNET( \item{new.params}{list of parameters to convert between different states} \item{inputs}{list of model inputs to use in write.configs.SIPNET} + +\item{verbose}{decide if we want to print the outputs.} } \value{ NONE } \description{ -Write restart files for SIPNET. WARNING: Some variables produce illegal values < 0 and have been hardcoded to correct these values!! +Write restart files for SIPNET. +WARNING: Some variables produce illegal values < 0 and have been hardcoded to correct these values!! } \author{ Ann Raiho \email{araiho@nd.edu} diff --git a/models/sipnet/model_info.json b/models/sipnet/model_info.json index 7c0bca184cc..f24749928ab 100644 --- a/models/sipnet/model_info.json +++ b/models/sipnet/model_info.json @@ -7,9 +7,9 @@ "creator": "Rob Kooper ", "contributors": [], "links": { - "source": "http://someurl/code", - "issues": "http://someurl/issues", - "documentation": "http://someurl/wiki" + "source": "https://github.com/PecanProject/sipnet", + "issues": "https://github.com/PecanProject/sipnet/issues", + "documentation": "https://github.com/PecanProject/sipnet/tree/master/docs" }, "inputs": {}, "bibtex": [] diff --git a/models/sipnet/tests/Rcheck_reference.log b/models/sipnet/tests/Rcheck_reference.log index 016fcbe1a07..92231b20a86 100644 --- a/models/sipnet/tests/Rcheck_reference.log +++ b/models/sipnet/tests/Rcheck_reference.log @@ -12,50 +12,8 @@ Maintainer: ‘Mike Dietze ’ New submission -License components with restrictions and base license permitting such: - BSD_3_clause + file LICENSE -File 'LICENSE': - ## This is the master copy of the PEcAn License - - University of Illinois/NCSA Open Source License - - Copyright (c) 2012, University of Illinois, NCSA. All rights reserved. - - PEcAn project - www.pecanproject.org - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal with the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - - Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimers. - - Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimers in the - documentation and/or other materials provided with the distribution. - - Neither the names of University of Illinois, NCSA, nor the names - of its contributors may be used to endorse or promote products - derived from this Software without specific prior written permission. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - IN NO EVENT SHALL THE CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR - ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF - CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE SOFTWARE. - -Unknown, possibly misspelled, fields in DESCRIPTION: - ‘Require’ - Strong dependencies not in mainstream repositories: PEcAn.data.atmosphere, PEcAn.logger, PEcAn.remote, PEcAn.utils - -The Date field is over a month old. * checking package namespace information ... OK * checking package dependencies ... OK * checking if this is a source package ... OK @@ -69,14 +27,8 @@ The Date field is over a month old. * checking installed package size ... OK * checking package directory ... OK * checking for future file timestamps ... OK -* checking DESCRIPTION meta-information ... NOTE -Author field differs from that derived from Authors@R - Author: ‘Mike Dietze’ - Authors@R: ‘Mike Dietze [aut, cre], University of Illinois, NCSA [cph]’ - -* checking top-level files ... NOTE -Non-standard files/directories found at top level: - ‘Dockerfile’ ‘model_info.json’ +* checking DESCRIPTION meta-information ... OK +* checking top-level files ... OK * checking for left-over files ... OK * checking index information ... OK * checking package subdirectories ... OK @@ -89,57 +41,26 @@ Non-standard files/directories found at top level: * checking whether the namespace can be unloaded cleanly ... OK * checking loading without being on the library search path ... OK * checking use of S3 registration ... OK -* checking dependencies in R code ... WARNING -'::' or ':::' imports not declared from: - ‘PEcAn.data.land’ ‘dplyr’ -Package in Depends field not imported from: ‘PEcAn.data.atmosphere’ - These packages need to be imported from (in the NAMESPACE file) - for when this namespace is loaded but not attached. +* checking dependencies in R code ... OK * checking S3 generic/method consistency ... OK * checking replacement functions ... OK * checking foreign function calls ... OK * checking R code for possible problems ... NOTE -met2model.SIPNET: no visible binding for global variable ‘i’ -met2model.SIPNET: no visible global function definition for ‘convolve’ model2netcdf.SIPNET: no visible binding for global variable ‘year’ -sample.IC.SIPNET: no visible global function definition for ‘runif’ -split_inputs.SIPNET: no visible global function definition for ‘%>%’ split_inputs.SIPNET: no visible binding for global variable ‘.’ split_inputs.SIPNET: no visible binding for global variable ‘V2’ split_inputs.SIPNET: no visible binding for global variable ‘V3’ split_inputs.SIPNET: no visible binding for global variable ‘Date’ split_inputs.SIPNET: no visible binding for global variable ‘V4’ -write_restart.SIPNET: no visible global function definition for ‘%>%’ Undefined global functions or variables: - %>% . Date V2 V3 V4 convolve i runif year -Consider adding - importFrom("stats", "convolve", "runif") -to your NAMESPACE file. + %>% . Date V2 V3 V4 year * checking Rd files ... OK * checking Rd metadata ... OK * checking Rd line widths ... OK * checking Rd cross-references ... OK * checking for missing documentation entries ... OK * checking for code/documentation mismatches ... OK -* checking Rd \usage sections ... WARNING -Undocumented arguments in documentation object 'met2model.SIPNET' - ‘...’ - -Undocumented arguments in documentation object 'model2netcdf.SIPNET' - ‘delete.raw’ - -Undocumented arguments in documentation object 'sample.IC.SIPNET' - ‘year’ - -Undocumented arguments in documentation object 'write.config.SIPNET' - ‘defaults’ ‘trait.values’ ‘settings’ ‘run.id’ ‘inputs’ ‘IC’ ‘restart’ - ‘spinup’ - -Functions with \usage entries need to have the appropriate \alias -entries, and all their arguments documented. -The \usage entries must correspond to syntactically valid R code. -See chapter ‘Writing R documentation files’ in the ‘Writing R -Extensions’ manual. +* checking Rd \usage sections ... OK * checking Rd contents ... OK * checking for unstated dependencies in examples ... OK * checking examples ... NONE @@ -149,4 +70,4 @@ Extensions’ manual. * checking for detritus in the temp directory ... OK * DONE -Status: 3 WARNINGs, 3 NOTEs +Status: 1 WARNING, 1 NOTE diff --git a/models/sipnet/tests/testthat.R b/models/sipnet/tests/testthat.R index 339f0c32a30..7a1fc04289f 100644 --- a/models/sipnet/tests/testthat.R +++ b/models/sipnet/tests/testthat.R @@ -1,11 +1,3 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2012 University of Illinois, NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# University of Illinois/NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- library(testthat) library(PEcAn.utils) diff --git a/models/stics/DESCRIPTION b/models/stics/DESCRIPTION index f37764650f0..2fcce87bfa7 100644 --- a/models/stics/DESCRIPTION +++ b/models/stics/DESCRIPTION @@ -1,8 +1,7 @@ Package: PEcAn.STICS Type: Package Title: PEcAn Package for Integration of the STICS Model -Version: 1.7.2 -Date: 2021-10-04 +Version: 1.8.0.9000 Authors@R: c( person("Istem", "Fer", email = "istem.fer@fmi.fi", @@ -10,17 +9,20 @@ Authors@R: c( Description: This module provides functions to link the STICS to PEcAn. Imports: PEcAn.settings, - PEcAn.DB, PEcAn.logger, PEcAn.utils (>= 1.4.8), PEcAn.remote, + jsonlite, lubridate, ncdf4, - purrr, XML, dplyr Suggests: + SticsRFiles, testthat (>= 1.0.2) +Remotes: + github::SticsRPacks/SticsRFiles, + github::SticsRPacks/SticsOnR SystemRequirements: STICS OS_type: unix License: BSD_3_clause + file LICENSE @@ -28,4 +30,4 @@ Copyright: Authors LazyLoad: yes LazyData: FALSE Encoding: UTF-8 -RoxygenNote: 7.2.3 +RoxygenNote: 7.3.2 diff --git a/models/stics/LICENSE b/models/stics/LICENSE index 5a9e44128f1..09ef35a60b4 100644 --- a/models/stics/LICENSE +++ b/models/stics/LICENSE @@ -1,34 +1,3 @@ -## This is the master copy of the PEcAn License - -University of Illinois/NCSA Open Source License - -Copyright (c) 2012, University of Illinois, NCSA. All rights reserved. - -PEcAn project -www.pecanproject.org - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal with the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -- Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimers. -- Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimers in the - documentation and/or other materials provided with the distribution. -- Neither the names of University of Illinois, NCSA, nor the names - of its contributors may be used to endorse or promote products - derived from this Software without specific prior written permission. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR -ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF -CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE SOFTWARE. - +YEAR: 2024 +COPYRIGHT HOLDER: PEcAn Project +ORGANIZATION: PEcAn Project, authors affiliations diff --git a/models/stics/R/met2model.STICS.R b/models/stics/R/met2model.STICS.R index ed7e9e1feb6..1d53bdd13ea 100644 --- a/models/stics/R/met2model.STICS.R +++ b/models/stics/R/met2model.STICS.R @@ -1,13 +1,3 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2012 University of Illinois, NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# University of Illinois/NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- - -##-------------------------------------------------------------------------------------------------# ##' Converts a met CF file to a model specific met file. The input ##' files are calld /.YYYY.cf ##' @@ -43,8 +33,8 @@ met2model.STICS <- function(in.path, in.prefix, outfolder, start_date, end_date, host = PEcAn.remote::fqdn(), mimetype = "text/plain", formatname = "climate", - startdate = start_date, - enddate = end_date, + startdate = start_date, # these need fixing,not same for all climate files + enddate = end_date, # these need fixing dbfile.name = out.files, stringsAsFactors = FALSE) PEcAn.logger::logger.info("internal results") @@ -63,6 +53,7 @@ met2model.STICS <- function(in.path, in.prefix, outfolder, start_date, end_date, if (file.exists(out.files.full[ctr]) && !overwrite) { PEcAn.logger::logger.debug("File '", out.files.full[ctr], "' already exists, skipping to next file.") + ctr <- ctr + 1 next } @@ -112,14 +103,24 @@ met2model.STICS <- function(in.path, in.prefix, outfolder, start_date, end_date, sec <- nc$dim$time$vals sec <- PEcAn.utils::ud_convert(sec, unlist(strsplit(nc$dim$time$units, " "))[1], "seconds") - dt <- PEcAn.utils::seconds_in_year(year) / length(sec) + dt <- diff(sec)[1] tstep <- round(86400 / dt) dt <- 86400 / tstep ind <- rep(simdays, each = tstep) + if(unlist(strsplit(nc$dim$time$units, " "))[1] %in% c("days", "day")){ + #this should always be the case, but just in case + origin_dt <- as.POSIXct(unlist(strsplit(nc$dim$time$units, " "))[3], "%Y-%m-%d", tz="UTC") + ydays <- lubridate::yday(origin_dt + sec) + + }else{ + PEcAn.logger::logger.error("Check units of time in the weather data.") + } + # column 6: minimum temperature (°C) Tair <- ncdf4::ncvar_get(nc, "air_temperature") ## in Kelvin + Tair <- Tair[ydays %in% simdays] Tair_C <- PEcAn.utils::ud_convert(Tair, "K", "degC") t_dmin <- round(tapply(Tair_C, ind, min, na.rm = TRUE), digits = 2) # maybe round these numbers weather_df[ ,6] <- t_dmin @@ -131,12 +132,14 @@ met2model.STICS <- function(in.path, in.prefix, outfolder, start_date, end_date, # column 8: global radiation (MJ m-2. j-1) rad <- ncdf4::ncvar_get(nc, "surface_downwelling_shortwave_flux_in_air") gr <- rad * 0.0864 # W m-2 to MJ m-2 d-1 + gr <- gr[ydays %in% simdays] weather_df[ ,8] <- round(tapply(gr, ind, mean, na.rm = TRUE), digits = 2) # irradiation (MJ m-2 d-1) # column 9: Penman PET (mm.j-1) OPTIONAL, leave it as -999.9 for now # column 10: rainfall (mm.j-1) Rain <- ncdf4::ncvar_get(nc, "precipitation_flux") # kg m-2 s-1 + Rain <- Rain[ydays %in% simdays] raini <- tapply(Rain * 86400, ind, mean, na.rm = TRUE) weather_df[ ,10] <- round(raini, digits = 2) # precipitation (mm d-1) @@ -144,10 +147,13 @@ met2model.STICS <- function(in.path, in.prefix, outfolder, start_date, end_date, # OPTIONAL if you're not using the “Shuttleworth and Wallace” method or the “Penman calculate” method to calculate PET in the station file U <- try(ncdf4::ncvar_get(nc, "eastward_wind")) V <- try(ncdf4::ncvar_get(nc, "northward_wind")) - if(is.numeric(U) & is.numeric(V)){ + if(is.numeric(U) & is.numeric(V) & !all(is.nan(U)) & !all(is.nan(V))){ + U <- U[ydays %in% simdays] + V <- V[ydays %in% simdays] ws <- sqrt(U ^ 2 + V ^ 2) }else{ ws <- try(ncdf4::ncvar_get(nc, "wind_speed")) + ws <- ws[ydays %in% simdays] if (is.numeric(ws)) { PEcAn.logger::logger.info("eastward_wind and northward_wind absent; using wind_speed") }else{ @@ -161,6 +167,7 @@ met2model.STICS <- function(in.path, in.prefix, outfolder, start_date, end_date, # column 13: CO2 content(ppm). co2 <- try(ncdf4::ncvar_get(nc, "mole_fraction_of_carbon_dioxide_in_air")) + co2 <- co2[ydays %in% simdays] if(is.numeric(co2)){ weather_df[ ,13] <- round(tapply(co2 * 1e6, ind, mean, na.rm = TRUE), digits = 1) }else{ diff --git a/models/stics/R/model2netcdf.STICS.R b/models/stics/R/model2netcdf.STICS.R index 012db5e80bf..924c67b3637 100644 --- a/models/stics/R/model2netcdf.STICS.R +++ b/models/stics/R/model2netcdf.STICS.R @@ -1,13 +1,3 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2012 University of Illinois, NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# University of Illinois/NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- - -##-------------------------------------------------------------------------------------------------# ##' Convert STICS output into the NACP Intercomparison format (ALMA using netCDF) ##' ##' @name model2netcdf.STICS @@ -22,14 +12,18 @@ ##' @export ##' ##' @author Istem Fer +##' model2netcdf.STICS <- function(outdir, sitelat, sitelon, start_date, end_date, overwrite = FALSE) { - - + ### Read in model output in STICS format out_files <- list.files(outdir) stics_out_file <- file.path(outdir, out_files[grepl("mod_s.*", out_files)]) - stics_output <- utils::read.table(stics_out_file, header = T, sep = ";") + stics_output <- lapply(stics_out_file, utils::read.table, header = TRUE, sep = ";") + stics_output <- do.call("rbind", stics_output) + # probably already ordered, but order by year and DoY + stics_output <- stics_output[order(stics_output[,1], stics_output[,4]), ] + simulation_years <- unique(stics_output$ian) @@ -38,7 +32,8 @@ model2netcdf.STICS <- function(outdir, sitelat, sitelon, start_date, end_date, o # check that specified years and output years match if (!all(year_seq %in% simulation_years)) { - PEcAn.logger::logger.severe("Years selected for model run and STICS output years do not match ") + # if not fail altogether, so that it won't break ensemble analysis + PEcAn.logger::logger.error("Years selected for model run and STICS output years do not match.") } # determine time step? @@ -52,7 +47,38 @@ model2netcdf.STICS <- function(outdir, sitelat, sitelon, start_date, end_date, o thisyear <- stics_output[ , "ian"] == y outlist <- list() - outlist[[1]] <- stics_output[thisyear, "lai.n."] # LAI in (m2 m-2) + outlist[[length(outlist)+1]] <- stics_output[thisyear, "lai.n."] # LAI in (m2 m-2) + + # daily amount of CO2-C emitted due to soil mineralisation (humus and organic residues) (kg ha-1 d-1) + HeteroResp <- PEcAn.utils::ud_convert(stics_output[thisyear, "CO2sol"], "ha-1 day-1", "m-2 s-1") + + outlist[[length(outlist)+1]] <- HeteroResp + + + # dltams(n): daily growth rate of the plant (t.ha-1.d-1) + dltams <- PEcAn.utils::ud_convert(stics_output[thisyear, "dltams.n."], "ton", "kg") * 0.48 # ton to kgC + # dltaremobil: daily amount of perennial reserves remobilised (t.ha-1.d-1) + dltaremobil <- PEcAn.utils::ud_convert(stics_output[thisyear, "dltaremobil"], "ton", "kg") * 0.48 # ton to kgC + + NPP <- dltams - dltaremobil # kgC ha-1 d-1 + NPP[NPP<0] <- 0 + + # double checking that this is all NPP (above and below) + ## this: + #stics_output[thisyear, "dltams.n."] # t.ha-1.d-1 + ## should be roughly equal to this: + #diff(stics_output[thisyear, "masec.n."])+ diff(stics_output[thisyear, "msrac.n."]) # t.ha-1 + + NPP <- PEcAn.utils::ud_convert(NPP, "ha-1 day-1", "m-2 s-1") # kg C m-2 s-1 + outlist[[length(outlist)+1]] <- NPP + + NEE <- -1*(NPP-HeteroResp) + outlist[[length(outlist)+1]] <- NEE + + # other vars + # Cr: amount of C in organic residues mixed with soil (kg.ha-1) + # Crac: amount of C in roots at harvest (kg.ha-1) + # Chumt: amount of C in humified organic matter (active + inert fractions) (kg.ha-1) # ******************** Declare netCDF dimensions and variables ********************# t <- ncdf4::ncdim_def(name = "time", @@ -68,7 +94,10 @@ model2netcdf.STICS <- function(outdir, sitelat, sitelon, start_date, end_date, o dims <- list(lon = lon, lat = lat, time = t) nc_var <- list() - nc_var[[1]] <- PEcAn.utils::to_ncvar("LAI", dims) + nc_var[[length(nc_var)+1]] <- PEcAn.utils::to_ncvar("LAI", dims) + nc_var[[length(nc_var)+1]] <- PEcAn.utils::to_ncvar("HeteroResp", dims) + nc_var[[length(nc_var)+1]] <- PEcAn.utils::to_ncvar("NPP", dims) + nc_var[[length(nc_var)+1]] <- PEcAn.utils::to_ncvar("NEE", dims) # ******************** Declare netCDF variables ********************# diff --git a/models/stics/R/version.R b/models/stics/R/version.R new file mode 100644 index 00000000000..0e58d885272 --- /dev/null +++ b/models/stics/R/version.R @@ -0,0 +1,3 @@ +# Set at package install time, used by pecan.all::pecan_version() +# to identify development versions of packages +.build_hash <- Sys.getenv("PECAN_GIT_REV", "unknown") diff --git a/models/stics/R/write.config.STICS.R b/models/stics/R/write.config.STICS.R index 3b190b31b3f..39802f5520a 100644 --- a/models/stics/R/write.config.STICS.R +++ b/models/stics/R/write.config.STICS.R @@ -1,13 +1,3 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2012 University of Illinois, NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# University of Illinois/NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- - -##-------------------------------------------------------------------------------------------------# ##' Writes STICS configurations. ##' ##' Requires a pft xml object, a list of trait values for a single model run, @@ -25,25 +15,130 @@ ##' @author Istem Fer ##-------------------------------------------------------------------------------------------------# write.config.STICS <- function(defaults, trait.values, settings, run.id) { + + ## the rest of the code assumes only plant PFTs + ## little modification here as not to have a bigger re-write for now + if(any(grepl("soil", names(trait.values)))){ + soil_params <- trait.values[[grep("soil", names(trait.values))]] + settings$pfts[[grep("soil", names(trait.values))]] <- NULL + trait.values[[grep("soil", names(trait.values))]] <- NULL + }else{ + soil_params <- NULL + } ## simulation days, used later dseq <- seq(lubridate::as_date(settings$run$start.date), lubridate::as_date(settings$run$end.date), by = "day") # find out where to write run/ouput rundir <- file.path(settings$host$rundir, run.id) - pltdir <- file.path(settings$host$rundir, run.id, "plant") cfgdir <- file.path(settings$host$rundir, run.id, "config") bindir <- file.path(settings$host$rundir, run.id, "bin") outdir <- file.path(settings$host$outdir, run.id) + + ########## Determining number of USMs (could be made its own function) + + # In STICS, it is 1 USM per crop cycle, where each cycle can be 2-years max + # If we have a consecutive monoculture for > 2 years, we still need to divide it into 2-year USMs + # If there are multiple pfts, this is a strong clue that there are multiple crop cycles + # but it can also be the case that there is one cycle with intercropping + + years_requested <- unique(lubridate::year(dseq)) + # we always pass two climate files to STICS, repeat the same year twice if the last crop cycle has 1 year only + if(length(years_requested) %%2 == 1) years_requested <- c(years_requested, years_requested[length(years_requested)]) + + # Could the events file hierarchy be organized by crop cycle? Need to check how ACE-json does + if(!is.null(settings$run$inputs$fielddata)){ + events_file <- jsonlite::read_json(settings$run$inputs$fielddata$path, simplifyVector = TRUE)[[1]] + + # testing new approach + if(!is.null(events_file$rotation)){ + usmdirs <- rep(NA, nrow(events_file$rotation)) + for(uic in seq_along(usmdirs)){ + p1 <- tolower(events_file$rotation$planted_crop1[uic]) + p2 <- ifelse(events_file$rotation$planted_crop2[uic] != "-99.0", tolower(events_file$rotation$planted_crop2[uic]), "") + uname <- paste0(p1,p2) + usmdirs[uic] <- paste0(file.path(settings$host$rundir, run.id, uname), "_", + lubridate::year(events_file$rotation$rotation_begin[uic]), "-", + lubridate::year(events_file$rotation$rotation_end[uic])) + } + }else{ + + # events file can have info from other years, subset + sub_events <- events_file$events[(lubridate::year(events_file$events$date) %in% years_requested),] + + + crops <- c(sub_events$planted_crop, sub_events$harvest_crop) + if(!is.null(crops)){ + crops <- crops[!is.na(crops)] # filter NAs caused by flattening the json + # for now taking a simplistic assumption that if there are more than 1 harvested + planted crops, there are multiple crop cycles + if(length(unique(crops)) > 1){ + # we probably have multiple pfts passed via settings, usmdir_root will be an array + usmdir_root <- paste0(file.path(settings$host$rundir, run.id, sapply(settings$pfts, `[[`, "name")), "_") + # !!! IMPORTANT: document also elsewhere + # I'm making STICS PFT names to match fieldactivity names, or more broadly whatever is in the events json file!!! + # e.g. barley is not barley but bar + # alternatively I can start a LUT to match bety-pft names to match events species codes + # we need to pass right parameters under right USM! + + if(length(years_requested) <= 2){ + # multiple usms due to crop rotation only + # associate spp and year + usmdirs <- sapply(crops, function(x){ + crop_yr <- lubridate::year(sub_events$date[(sub_events$planted_crop %in% x) | (sub_events$harvest_crop %in% x)]) + crop_usm <- paste0(usmdir_root[grep(tolower(x), usmdir_root)], crop_yr) + return(crop_usm) + }) + + # make sure the usmdir order is the same as the rotation order + # this may need to get more sophisticated in the future + # but keeping the usmdirs in chronological order will come handy in the rest of this function + usmdirs <- usmdirs[order(sapply(strsplit(sub(".*_", "", basename(usmdirs)), "-"), function(x) min(as.numeric(x))))] + + }else{ + # multiple usms due to crop rotation and multiple cropping seasons per rotation + # not implemented yet + PEcAn.logger::logger.severe("write.config.STICS is under development for this case.") + } + + }else{ + # single crop, single usmdir_root + usmdir_root <- paste0(file.path(settings$host$rundir, run.id, settings$pfts$pft$name), "_") + if(length(years_requested) > 2){ + # multiple usms because more than 2 years of simulation + years_indices <- rep(seq(1, length(years_requested), by=2), each=2) + usmdirs <- tapply(years_requested, years_indices, function(x) paste0(usmdir_root, paste(x, collapse = '-'))) + }else{ + # single usm because less than 2 years of simulation + usmdirs <- paste0(usmdir_root, paste(years_requested, collapse = '-')) + } + } + + }else{ + # somehow events have no crop identifiers, e.g. only fertilization and tilling events are passed + # most likely a partial year & crop cycle + usmdir_root <- paste0(file.path(settings$host$rundir, run.id, settings$pfts$pft$name), "_") + # single usm + usmdirs <- paste0(usmdir_root, paste(years_requested, collapse = '-')) + } + + } + + } + + # TODO: have a better way to determine USMs + + ########################## finish usmdirs + + ## make sure rundir and outdir exist dir.create(rundir, showWarnings = FALSE, recursive = TRUE) dir.create(outdir, showWarnings = FALSE, recursive = TRUE) - ## create plant, config and bin dirs - dir.create(pltdir, showWarnings = FALSE, recursive = TRUE) - dir.create(cfgdir, showWarnings = FALSE, recursive = TRUE) - dir.create(bindir, showWarnings = FALSE, recursive = TRUE) + ## create usm, config and bin dirs + dir.create(cfgdir, showWarnings = FALSE, recursive = TRUE) + dir.create(bindir, showWarnings = FALSE, recursive = TRUE) + sapply(usmdirs, dir.create, showWarnings = FALSE, recursive = TRUE) # write preferences prf_xml <- XML::xmlParse(system.file("preferences.xml", package = "PEcAn.STICS")) @@ -54,11 +149,16 @@ write.config.STICS <- function(defaults, trait.values, settings, run.id) { file = file.path(cfgdir, "preferences.xml"), prefix = '\n\n') - # read in template USM (Unit of SiMulation) file, has the master settings, file names etc. - # TODO: more than one usm - usm_xml <- XML::xmlParse(system.file("usms.xml", package = "PEcAn.STICS")) - usm_list <- XML::xmlToList(usm_xml) + # stics and javastics path + stics_path <- settings$model$binary + + + # Per STICS development team, there are two types of STICS inputs + # Global input: _plt.xml, param_gen.xml, param_newform.xml + # Local input: _ini.xml (initialization), sols.xml (soils), _tec.xml (crop management), (climate files) _sta.xml, *.year + + # NOTE: however, it's the text files, not the xml files that are read by the STICS executable. ################################# Prepare Plant File ####################################### @@ -66,53 +166,999 @@ write.config.STICS <- function(defaults, trait.values, settings, run.id) { # read in template plt file, has all the formalisms plt_xml <- XML::xmlParse(system.file("crop_plt.xml", package = "PEcAn.STICS")) - plt_list <- XML::xmlToList(plt_xml) - + #plt_list <- XML::xmlToList(plt_xml) + plt_files <- list() for (pft in seq_along(trait.values)) { pft.traits <- unlist(trait.values[[pft]]) pft.names <- names(pft.traits) + plant_file <- file.path(rundir, paste0(names(trait.values)[pft], "_plt.xml")) + + + if(names(trait.values)[pft] != "env"){ + # save the template, will be overwritten below + XML::saveXML(plt_xml, file = plant_file) + }else{ + next + } + + plt_files[[pft]] <- plant_file + + # to learn the parameters in a plant file + # SticsRFiles::get_param_info(file_path = plant_file) + # go over each formalism and replace params following the order in crop_plt - # for now I vary only one parameter under roots. + # TODO: vary more params # plant name and group # effect of atmospheric CO2 concentration + # phasic development + # to see parameters per formalism + # values = SticsRFiles::get_param_xml(plant_file, select = "formalisme", select_value = "phasic development") + # unlist(values) + + # name code of plant in 3 letters + # a handful of plants have to have specific codes, e.g. forages need to be 'fou' and vine needs to be 'vig' + # but others can be anything? if not, either consider a LUT or passing via settings + if(names(trait.values)[pft] %in% c("frg", "wcl", "alf")){ + codeplante <- 'fou' + codeperenne <- 2 + }else{ + codeplante <- base::substr(names(trait.values)[pft],1,3) + codeperenne <- 1 + } + codebfroid <- 2 # vernalization requirement, hardcoding for now, 2==yes + SticsRFiles::set_param_xml(plant_file, "codeplante", codeplante, overwrite = TRUE) + SticsRFiles::set_param_xml(plant_file, "codeperenne", codeperenne, overwrite = TRUE) + SticsRFiles::set_param_xml(plant_file, "codebfroid", codebfroid, overwrite = TRUE) + + # minimum temperature below which development stops (degree C) + if ("tdmin" %in% pft.names) { + SticsRFiles::set_param_xml(plant_file, "tdmin", pft.traits[which(pft.names == "tdmin")], overwrite = TRUE) + } + + # maximum temperature above which development stops (degree C) + if ("tdmax" %in% pft.names) { + SticsRFiles::set_param_xml(plant_file, "tdmax", pft.traits[which(pft.names == "tdmax")], overwrite = TRUE) + } + + # basal photoperiod + if ("phobase" %in% pft.names) { + SticsRFiles::set_param_xml(plant_file, "phobase", pft.traits[which(pft.names == "phobase")], overwrite = TRUE) + } + + # saturating photoperiod + if ("phosat" %in% pft.names) { + SticsRFiles::set_param_xml(plant_file, "phosat", pft.traits[which(pft.names == "phosat")], overwrite = TRUE) + } + + + # maximum phasic delay allowed due to stresses + if ("phasic_delay_max" %in% pft.names) { + SticsRFiles::set_param_xml(plant_file, "stressdev", pft.traits[which(pft.names == "phasic_delay_max")], overwrite = TRUE) + } + + # minimum number of vernalising days (d) [0,7] + if ("vernalization_days_min" %in% pft.names) { + SticsRFiles::set_param_xml(plant_file, "jvcmini", round(pft.traits[which(pft.names == "vernalization_days_min")]), overwrite = TRUE) + } + + # day of initiation of vernalisation in perennial crops (julian d) [1,731] + # this only takes effect for perennial crops + if ("vernalization_init" %in% pft.names) { + SticsRFiles::set_param_xml(plant_file, "julvernal", round(pft.traits[which(pft.names == "vernalization_init")]), overwrite = TRUE) + } + + # optimal temperature for vernalisation (degreeC) + if ("vernalization_TOpt" %in% pft.names) { + SticsRFiles::set_param_xml(plant_file, "tfroid", pft.traits[which(pft.names == "vernalization_TOpt")], overwrite = TRUE) + } + + # semi thermal amplitude for vernalising effect (degreeC) + if ("vernalization_TAmp" %in% pft.names) { + SticsRFiles::set_param_xml(plant_file, "ampfroid", pft.traits[which(pft.names == "vernalization_TAmp")], overwrite = TRUE) + } + + if ("coeflevamf" %in% pft.names) { + SticsRFiles::set_param_xml(plant_file, "coeflevamf", pft.traits[which(pft.names == "coeflevamf")], overwrite = TRUE) + } + + if ("coefamflax" %in% pft.names) { + SticsRFiles::set_param_xml(plant_file, "coefamflax", pft.traits[which(pft.names == "coefamflax")], overwrite = TRUE) + } + + if ("coeflaxsen" %in% pft.names) { + SticsRFiles::set_param_xml(plant_file, "coeflaxsen", pft.traits[which(pft.names == "coeflaxsen")], overwrite = TRUE) + } + + if ("coefsenlan" %in% pft.names) { + SticsRFiles::set_param_xml(plant_file, "coefsenlan", pft.traits[which(pft.names == "coefsenlan")], overwrite = TRUE) + } + + if ("coeflevdrp" %in% pft.names) { + SticsRFiles::set_param_xml(plant_file, "coeflevdrp", pft.traits[which(pft.names == "coeflevdrp")], overwrite = TRUE) + } + + if ("coefdrpmat" %in% pft.names) { + SticsRFiles::set_param_xml(plant_file, "coefdrpmat", pft.traits[which(pft.names == "coefdrpmat")], overwrite = TRUE) + } + + if ("coefflodrp" %in% pft.names) { + SticsRFiles::set_param_xml(plant_file, "coefflodrp", pft.traits[which(pft.names == "coefflodrp")], overwrite = TRUE) + } + + # emergence and starting + # values = SticsRFiles::get_param_xml(plant_file, select = "formalisme", select_value = "emergence and starting") + # unlist(values) + + # minimum temperature below which emergence is stopped (degreeC) + if ("emergence_Tmin" %in% pft.names) { + SticsRFiles::set_param_xml(plant_file, "tgmin", pft.traits[which(pft.names == "emergence_Tmin")], overwrite = TRUE) + } + + # nbfeuilplant, leaf number per plant when planting, default 0, skipping for now + + + # this is a switch, for now hardcoding to have delay at the beginning of the crop (1) + # if starting the simulation from a later stage (e.g. lev) this has no effect + # codegermin, option of simulation of a germination phase or a delay at the beginning of the crop (1) or direct starting (2) + SticsRFiles::set_param_xml(plant_file, "codegermin", 1, overwrite = TRUE) + + # cumulative thermal time allowing germination (degree-d) + if ("cum_thermal_germin" %in% pft.names) { + SticsRFiles::set_param_xml(plant_file, "stpltger", pft.traits[which(pft.names == "cum_thermal_germin")], overwrite = TRUE) + } + + # skipping the other parameters related to this switch, they don't seem influential, at least on NPP and LAI + # potgermi: soil water potential under which seed imbibition is impeded + # nbjgerlim: maximum number of days after grain imbibition allowing full germination + # propjgermin: minimal proportion of the duration nbjgerlim when the temperature is higher than the temperature threshold Tdmax + + + # parameter of the curve of coleoptile elongation + if ("belong" %in% pft.names) { + SticsRFiles::set_param_xml(plant_file, "belong", pft.traits[which(pft.names == "belong")], overwrite = TRUE) + } + + # parameter of the plantlet elongation curve + if ("celong" %in% pft.names) { + SticsRFiles::set_param_xml(plant_file, "celong", pft.traits[which(pft.names == "celong")], overwrite = TRUE) + } + + # maximum elongation of the coleoptile in darkness condition + if ("coleoptile_elong_dark_max" %in% pft.names) { + SticsRFiles::set_param_xml(plant_file, "elmax", pft.traits[which(pft.names == "coleoptile_elong_dark_max")], overwrite = TRUE) + } + + # number of days after germination after which plant emergence is reduced + if ("days_reduced_emergence_postgerm" %in% pft.names) { + SticsRFiles::set_param_xml(plant_file, "nlevlim1", round(pft.traits[which(pft.names == "days2reduced_emergence_postgerm")]), overwrite = TRUE) + } + + # number of days after germination after which plant emergence is impossible + if ("days2stopped_emergence_postgerm" %in% pft.names) { + SticsRFiles::set_param_xml(plant_file, "nlevlim2", round(pft.traits[which(pft.names == "days2stopped_emergence_postgerm")]), overwrite = TRUE) + } + + # plant vigor index allowing to emerge through a soil crust, vigueurbat == 1 inactivates some soil crust related parameters, skipping for now + + # there are also "planting" related parameters + # leaves + # values = SticsRFiles::get_param_xml(plant_file, select = "formalisme", select_value = "leaves") + # unlist(values) + + + # phyllotherme, thermal duration between the apparition of two successive leaves on the main stem (degree day) + # assuming this is the same as phyllochron + if ("phyllochron" %in% pft.names) { + SticsRFiles::set_param_xml(plant_file, "phyllotherme", pft.traits[which(pft.names == "phyllochron")], overwrite = TRUE) + } + + # minimal density above which interplant competition starts (m-2) + if ("dens_comp" %in% pft.names) { + SticsRFiles::set_param_xml(plant_file, "bdens", pft.traits[which(pft.names == "dens_comp")], overwrite = TRUE) + } + + # LAI above which competition between plants starts (m2 m-2) + if ("lai_comp" %in% pft.names) { + SticsRFiles::set_param_xml(plant_file, "laicomp", pft.traits[which(pft.names == "lai_comp")], overwrite = TRUE) + } + + # basal height of crop (m) + if ("height" %in% pft.names) { + SticsRFiles::set_param_xml(plant_file, "hautbase", pft.traits[which(pft.names == "height")], overwrite = TRUE) + } + + # maximum height of crop + if ("HTMAX" %in% pft.names) { + SticsRFiles::set_param_xml(plant_file, "hautmax", pft.traits[which(pft.names == "HTMAX")], overwrite = TRUE) + } + + # minimum temperature at which growth ceases + if ("tcmin_growth" %in% pft.names) { + SticsRFiles::set_param_xml(plant_file, "tcmin", pft.traits[which(pft.names == "tcmin_growth")], overwrite = TRUE) + } + + # maximum temperature at which growth ceases + if ("tcmax_growth" %in% pft.names) { + SticsRFiles::set_param_xml(plant_file, "tcmax", pft.traits[which(pft.names == "tcmax_growth")], overwrite = TRUE) + } + + # temperature beyond which foliar growth stops + if ("tcmax_foliar_growth" %in% pft.names) { + # tcxstop must be > tdmax, priors should be set that way, and we can let the simulation fail afterwards, but putting a warning here + tdmax <- SticsRFiles::get_param_xml(plant_file, param="tdmax", select = "formalisme", select_value = "phasic development")[[1]][[1]] + tcxstop <- pft.traits[which(pft.names == "tcmax_foliar_growth")] + if(tcxstop < tdmax){ + PEcAn.logger::logger.warn("tcmax_foliar_growth value (", tcxstop, ") should be greater than tdmax (", tdmax, ").") + } + SticsRFiles::set_param_xml(plant_file, "tcxstop", tcxstop, overwrite = TRUE) + + } + + # ulai at the inflexion point of the function DELTAI=f(ULAI) + if ("vlaimax" %in% pft.names) { + SticsRFiles::set_param_xml(plant_file, "vlaimax", pft.traits[which(pft.names == "vlaimax")], overwrite = TRUE) + } + + # parameter of the logistic curve of LAI growth + if ("pentlaimax" %in% pft.names) { + SticsRFiles::set_param_xml(plant_file, "pentlaimax", pft.traits[which(pft.names == "pentlaimax")], overwrite = TRUE) + } + + # ulai from which the rate of leaf growth decreases + if ("udlaimax" %in% pft.names) { + SticsRFiles::set_param_xml(plant_file, "udlaimax", pft.traits[which(pft.names == "udlaimax")], overwrite = TRUE) + } + + # life span of early leaves expressed as a fraction of the life span of the last leaves emitted DURVIEF + if ("early2last_leaflife" %in% pft.names) { + SticsRFiles::set_param_xml(plant_file, "ratiodurvieI", pft.traits[which(pft.names == "early2last_leaflife")], overwrite = TRUE) + } + + # fraction of senescent biomass (relative to total biomass) + if ("senes2total_biomass" %in% pft.names) { + SticsRFiles::set_param_xml(plant_file, "ratiosen", pft.traits[which(pft.names == "senes2total_biomass")], overwrite = TRUE) + } + + # fraction of senescent leaves falling to the soil + # not sure if this is supposed to be a fraction or a percentage in STICS, values look like a fraction but min-max is given as 0-100 + # treating it like a fraction for now + if ("fracLeafFall" %in% pft.names) { + SticsRFiles::set_param_xml(plant_file, "abscission", pft.traits[which(pft.names == "fracLeafFall")], overwrite = TRUE) + } + + # parameter relating the C/N of dead leaves and the INN + if ("parazofmorte" %in% pft.names) { + SticsRFiles::set_param_xml(plant_file, "parazofmorte", pft.traits[which(pft.names == "parazofmorte")], overwrite = TRUE) + } + + # parameter of the N stress function active on leaf expansion (INNLAI), bilinear function vs INN passing through the point (INNmin, INNturgmin) + if ("innturgmin" %in% pft.names) { + SticsRFiles::set_param_xml(plant_file, "innturgmin", pft.traits[which(pft.names == "innturgmin")], overwrite = TRUE) + } + + # accelerating parameter for the lai growth rate + if ("lai_growth_rate_accelerating" %in% pft.names) { + SticsRFiles::set_param_xml(plant_file, "dlaimin", pft.traits[which(pft.names == "lai_growth_rate_accelerating")], overwrite = TRUE) + } + + # maximum rate of the setting up of LAI + if ("lai_max_rate" %in% pft.names) { + SticsRFiles::set_param_xml(plant_file, "dlaimaxbrut", pft.traits[which(pft.names == "lai_max_rate")], overwrite = TRUE) + } + + # relative additional lifespan due to N excess in plant (INN > 1) + if ("relative_addlifespan_DT_excessN" %in% pft.names) { + SticsRFiles::set_param_xml(plant_file, "durviesupmax", pft.traits[which(pft.names == "relative_addlifespan_DT_excessN")], overwrite = TRUE) + } + + # parameter of the N stress function active on senescence (INNsenes), bilinear function vs INN passing through the point (INNmin, INNsen) + if ("innsen" %in% pft.names) { + SticsRFiles::set_param_xml(plant_file, "innsen", pft.traits[which(pft.names == "innsen")], overwrite = TRUE) + } + + # threshold soil water content active to simulate water senescence stress as a proportion of the turgor stress + if ("rapsenturg" %in% pft.names) { + SticsRFiles::set_param_xml(plant_file, "rapsenturg", pft.traits[which(pft.names == "rapsenturg")], overwrite = TRUE) + } + + # radiation interception + # values = SticsRFiles::get_param_xml(plant_file, select = "formalisme", select_value = "radiation interception") + + # extinction coefficient of photosynthetic active radiation in the canopy + if ("extinction_coefficient_diffuse" %in% pft.names) { + SticsRFiles::set_param_xml(plant_file, "extin", pft.traits[which(pft.names == "extinction_coefficient_diffuse")], overwrite = TRUE) + } + # shoot biomass growth + # values = SticsRFiles::get_param_xml(plant_file, select = "formalisme", select_value = "shoot biomass growth") + + # minimum temperature for development + if ("temin" %in% pft.names) { + SticsRFiles::set_param_xml(plant_file, "temin", pft.traits[which(pft.names == "temin")], overwrite = TRUE) + } + + # maximal temperature above which plant growth stops + if ("temax" %in% pft.names) { + SticsRFiles::set_param_xml(plant_file, "temax", pft.traits[which(pft.names == "temax")], overwrite = TRUE) + } + + # optimal temperature (1/2) for plant growth + if ("teopt" %in% pft.names) { + SticsRFiles::set_param_xml(plant_file, "teopt", pft.traits[which(pft.names == "teopt")], overwrite = TRUE) + } + + # optimal temperature (2/2) for plant growth + if ("teoptbis" %in% pft.names) { + SticsRFiles::set_param_xml(plant_file, "teoptbis", pft.traits[which(pft.names == "teoptbis")], overwrite = TRUE) + } + + # maximum radiation use efficiency during the juvenile phase + if ("RUE_juv" %in% pft.names) { + SticsRFiles::set_param_xml(plant_file, "efcroijuv", pft.traits[which(pft.names == "RUE_juv")], overwrite = TRUE) + } + + # maximum radiation use efficiency during the vegetative stage + if ("RUE_veg" %in% pft.names) { + SticsRFiles::set_param_xml(plant_file, "efcroiveg", pft.traits[which(pft.names == "RUE_veg")], overwrite = TRUE) + } + + # maximum radiation use efficiency during the grain filling phase + if ("RUE_rep" %in% pft.names) { + SticsRFiles::set_param_xml(plant_file, "efcroirepro", pft.traits[which(pft.names == "RUE_rep")], overwrite = TRUE) + } + + # fraction of daily remobilisable C reserves + if ("remobres" %in% pft.names) { + SticsRFiles::set_param_xml(plant_file, "remobres", pft.traits[which(pft.names == "remobres")], overwrite = TRUE) + } + + # ratio biomass / useful height cut of crops (t.ha-1.m-1) + if ("biomass2usefulheight" %in% pft.names) { + SticsRFiles::set_param_xml(plant_file, "coefmshaut", pft.traits[which(pft.names == "biomass2usefulheight")], overwrite = TRUE) + } + + # partitioning of biomass in organs - # yield formation + # values = SticsRFiles::get_param_xml(plant_file, select = "formalisme", select_value = "partitioning of biomass in organs") + + # maximum SLA (specific leaf area) of green leaves (cm2 g-1) + if ("SLAMAX" %in% pft.names) { + slamax <- pft.traits[which(pft.names == "SLAMAX")] + slamax <- PEcAn.utils::ud_convert(PEcAn.utils::ud_convert(slamax, "m2", "cm2"), "kg-1", "g-1") # m2 kg-1 to cm2 g-1 + SticsRFiles::set_param_xml(plant_file, "slamax", slamax, overwrite = TRUE) + } + + # minimum SLA (specific leaf area) of green leaves (cm2 g-1) + if ("SLAMIN" %in% pft.names) { + slamin <- pft.traits[which(pft.names == "SLAMIN")] + slamin <- PEcAn.utils::ud_convert(PEcAn.utils::ud_convert(slamin, "m2", "cm2"), "kg-1", "g-1") # m2 kg-1 to cm2 g-1 + SticsRFiles::set_param_xml(plant_file, "slamin", slamin, overwrite = TRUE) + } + + + # ratio stem (structural part)/leaf + if ("stem2leaf" %in% pft.names) { + SticsRFiles::set_param_xml(plant_file, "tigefeuil", pft.traits[which(pft.names == "stem2leaf")], overwrite = TRUE) + } + + # skipping: envfruit, fraction of envelop in grainmaxi (w:w) + # skipping: sea, specific area of fruit envelops + + # yield formation, will get back # roots + # values = SticsRFiles::get_param_xml(plant_file, select = "formalisme", select_value = "roots") + - # specific root length (cm g-1) - # plt_list[[10]][[6]][[2]][[4]] position + # sensanox, index of anoxia sensitivity (0 = insensitive), 0 for now + # stoprac, stage when root growth stops (LAX= maximum leaf area index, end of leaf growth or SEN=beginning of leaf senescence) + + # sensrsec, index of root sensitivity to drought (1=insensitive) + if ("rootsens2drought" %in% pft.names) { + SticsRFiles::set_param_xml(plant_file, "sensrsec", pft.traits[which(pft.names == "rootsens2drought")], overwrite = TRUE) + } + + # contrdamax, maximal reduction in root growth rate due to soil strengthness (high bulk density) + if ("db_reduc_rgr_max" %in% pft.names) { + SticsRFiles::set_param_xml(plant_file, "contrdamax", pft.traits[which(pft.names == "db_reduc_rgr_max")], overwrite = TRUE) + } + + # draclong, maximum rate of root length production per plant (cm plant-1 degreeD-1) + if ("rootlength_prod_max" %in% pft.names) { + SticsRFiles::set_param_xml(plant_file, "draclong", pft.traits[which(pft.names == "rootlength_prod_max")], overwrite = TRUE) + } + + # debsenrac, sum of degrees-days defining the beginning of root senescence (root life time) (degreeD) + if ("root_sen_dday" %in% pft.names) { + SticsRFiles::set_param_xml(plant_file, "debsenrac", round(pft.traits[which(pft.names == "root_sen_dday")]), overwrite = TRUE) + } + + #lvfront, root density at the root apex (cm cm-3) + if ("rootdens_at_apex" %in% pft.names) { + SticsRFiles::set_param_xml(plant_file, "lvfront", pft.traits[which(pft.names == "rootdens_at_apex")], overwrite = TRUE) + } + + # longsperac - specific root length (cm g-1) if ("SRL" %in% pft.names) { srl_val <- PEcAn.utils::ud_convert(pft.traits[which(pft.names == "SRL")], "m", "cm") - plt_list <- plt_list %>% purrr::modify_depth(-1, ~if(all(.x == "@longsperac@")) srl_val else .x) - + SticsRFiles::set_param_xml(plant_file, "longsperac", srl_val, overwrite = TRUE) } + # option to activate the N influence on root partitioning within the soil profile (1 = yes, 2 = no) + SticsRFiles::set_param_xml(plant_file, "codazorac", 1, overwrite = TRUE) + + # reduction factor on root growth when soil mineral N is limiting (< minazorac) + if ("minefnra" %in% pft.names) { + SticsRFiles::set_param_xml(plant_file, "minefnra", pft.traits[which(pft.names == "minefnra")], overwrite = TRUE) + } + + # mineral N concentration in soil below which root growth is reduced (kg.ha-1.cm-1) + if ("minazorac" %in% pft.names) { + SticsRFiles::set_param_xml(plant_file, "minazorac", pft.traits[which(pft.names == "minazorac")], overwrite = TRUE) + } + + # mineral N concentration in soil above which root growth is maximum (kg.ha-1.cm-1) + if ("maxazorac" %in% pft.names) { + SticsRFiles::set_param_xml(plant_file, "maxazorac", pft.traits[which(pft.names == "maxazorac")], overwrite = TRUE) + } + # frost - # water + + # formalism - water + + # psisto, potential of stomatal closing (absolute value) (bars) + # note: units in betyDB are m, but my prior is for testing + if ("psi_stomata_closure" %in% pft.names) { + SticsRFiles::set_param_xml(plant_file, "psisto", pft.traits[which(pft.names == "psi_stomata_closure")], overwrite = TRUE) + } + + # psiturg, potential of the beginning of decrease of the cellular extension (absolute value) (bars) + # may or may not be leaf_psi_tlp in betyDB + if ("leaf_psi_tlp" %in% pft.names) { + SticsRFiles::set_param_xml(plant_file, "psiturg", pft.traits[which(pft.names == "leaf_psi_tlp")], overwrite = TRUE) + } + + # h2ofeuilverte, water content of green leaves (relative to fresh matter) (g g-1) + # may or may not be water_content_TLP_leaf in betyDB + if ("water_content_TLP_leaf" %in% pft.names) { + SticsRFiles::set_param_xml(plant_file, "h2ofeuilverte", pft.traits[which(pft.names == "water_content_TLP_leaf")], overwrite = TRUE) + } + + # skipping: + # h2ofeuiljaune + # h2otigestruc + # h2otigestruc + # h2ofrvert + # deshydbase + # tempdeshyd + + # kmax, maximum crop coefficient for water requirements (=MET/PET) + if ("crop_water_max" %in% pft.names) { + SticsRFiles::set_param_xml(plant_file, "kmax", pft.traits[which(pft.names == "crop_water_max")], overwrite = TRUE) + } + # nitrogen + # masecNmax + if ("masecNmax" %in% pft.names) { + SticsRFiles::set_param_xml(plant_file, "masecNmax", pft.traits[which(pft.names == "masecNmax")], overwrite = TRUE) + } + + # Nreserve + if ("Nreserve" %in% pft.names) { + SticsRFiles::set_param_xml(plant_file, "Nreserve", pft.traits[which(pft.names == "Nreserve")], overwrite = TRUE) + } + + + # Kmabs1 + if ("Kmabs1" %in% pft.names) { + SticsRFiles::set_param_xml(plant_file, "Kmabs1", pft.traits[which(pft.names == "Kmabs1")], overwrite = TRUE) + } + + # adil + if ("adil" %in% pft.names) { + SticsRFiles::set_param_xml(plant_file, "adil", pft.traits[which(pft.names == "adil")], overwrite = TRUE) + } + + # bdil + if ("bdil" %in% pft.names) { + SticsRFiles::set_param_xml(plant_file, "bdil", pft.traits[which(pft.names == "bdil")], overwrite = TRUE) + } + + # INNmin + if ("INNmin" %in% pft.names) { + SticsRFiles::set_param_xml(plant_file, "INNmin", pft.traits[which(pft.names == "INNmin")], overwrite = TRUE) + } + + # Nmeta + if ("Nmeta" %in% pft.names) { + SticsRFiles::set_param_xml(plant_file, "Nmeta", pft.traits[which(pft.names == "Nmeta")]*100, overwrite = TRUE) + } + # correspondance code BBCH + # cultivar parameters + # values = SticsRFiles::get_param_xml(plant_file, select = "formalisme", select_value = "cultivar parameters") + + # there are multiple cultivars (varietes) in plt file + # for now I assume we will always use only #1 in simulations + # hence, _tec file will always say variete==1, if you change the logic don't forget to update handling of the _tec file accordingly + + # maximal lifespan of an adult leaf expressed in summation of Q10=2 (2**(T-Tbase)) + if ("leaf_lifespan_max" %in% pft.names) { + # this will modifies all varietes' durvieFs by default + SticsRFiles::set_param_xml(plant_file, "durvieF", pft.traits[which(pft.names == "leaf_lifespan_max")], overwrite = TRUE) + # see example for setting a particular (the Grindstad) cultivar param + # SticsRFiles::set_param_xml(plant_file, "durvieF", pft.traits[which(pft.names == "leaf_lifespan_max")], select = "Grindstad", overwrite = TRUE) + } + + # cumulative thermal time between the stages LEV (emergence) and AMF (maximum acceleration of leaf growth, end of juvenile phase) + if ("cum_thermal_juvenile" %in% pft.names) { + SticsRFiles::set_param_xml(plant_file, "stlevamf", pft.traits[which(pft.names == "cum_thermal_juvenile")], overwrite = TRUE) + } + + # cumulative thermal time between the stages AMF (maximum acceleration of leaf growth, end of juvenile phase) and LAX (maximum leaf area index, end of leaf growth) + if ("cum_thermal_growth" %in% pft.names) { + SticsRFiles::set_param_xml(plant_file, "stamflax", pft.traits[which(pft.names == "cum_thermal_growth")], overwrite = TRUE) + } + + # cumulative thermal time between the stages LEV (emergence) and DRP (starting date of filling of harvested organs) + if ("cum_thermal_filling" %in% pft.names) { + SticsRFiles::set_param_xml(plant_file, "stlevdrp", pft.traits[which(pft.names == "cum_thermal_filling")], overwrite = TRUE) + } + + if ("adens" %in% pft.names) { + SticsRFiles::set_param_xml(plant_file, "adens", pft.traits[which(pft.names == "adens")], overwrite = TRUE) + } + + if ("croirac" %in% pft.names) { + SticsRFiles::set_param_xml(plant_file, "croirac", pft.traits[which(pft.names == "croirac")], overwrite = TRUE) + } + + # extinction coefficient connecting LAI to crop height + if ("LAI2height" %in% pft.names) { + SticsRFiles::set_param_xml(plant_file, "khaut", pft.traits[which(pft.names == "LAI2height")], overwrite = TRUE) + } + + # average root radius + if ("rayon" %in% pft.names) { + SticsRFiles::set_param_xml(plant_file, "rayon", pft.traits[which(pft.names == "rayon")], overwrite = TRUE) + } - # write back + # minimal value for drought stress index + if ("swfacmin" %in% pft.names) { + SticsRFiles::set_param_xml(plant_file, "swfacmin", pft.traits[which(pft.names == "swfacmin")], overwrite = TRUE) + } + + # convert xml2txt if(names(trait.values)[pft] != "env"){ + SticsRFiles::convert_xml2txt(file = plant_file) + # do I also need to move the file out of the plant folder to main rundir? + } + + this_usm <- grep(names(trait.values)[pft], usmdirs) + sapply(this_usm, function(x){ + file.copy(file.path(rundir, "ficplt1.txt"), file.path(usmdirs[x], "ficplt1.txt"), overwrite = TRUE) + }) + + } # pft-loop ends + + + + ############################## Param gen / newform #################################### + + ## these also have plant parameters as well as soil + ## at the moment everything is treated as params, but some could be IC or come from the events file + + # these parameters won't change as crop changes in a continous rotation + soil.names <- names(soil_params) + + for (pft in seq_along(trait.values)) { + + if(names(trait.values)[pft] == "env"){ + next + } + + gen_xml <- XML::xmlParse(system.file("param_gen.xml", package = "PEcAn.STICS")) + gen_file <- file.path(rundir, "param_gen.xml") + XML::saveXML(gen_xml, file = gen_file) + codeinitprec <- ifelse(length(usmdirs>1), 1, 2) + SticsRFiles::set_param_xml(gen_file, "codeinitprec", codeinitprec, overwrite = TRUE) + + newf_xml <- XML::xmlParse(system.file("param_newform.xml", package = "PEcAn.STICS")) + newf_file <- file.path(rundir, "param_newform.xml") + XML::saveXML(newf_xml, file = newf_file) + + + pft.traits <- unlist(trait.values[[pft]]) + pft.names <- names(pft.traits) + + ### Shoot growth + # parameter defining radiation effect on conversion efficiency + if ("rad_on_conversion_eff" %in% pft.names) { + SticsRFiles::set_param_xml(gen_file, "coefb", pft.traits[which(pft.names == "rad_on_conversion_eff")], overwrite = TRUE) + } + + # ratio of root mass to aerial mass at harvest + if ("root2aerial_harvest" %in% pft.names) { + SticsRFiles::set_param_xml(gen_file, "proprac", pft.traits[which(pft.names == "root2aerial_harvest")], overwrite = TRUE) + } + + # minimal amount of root mass at harvest (when aerial biomass is nil) t.ha-1 + if ("rootmin_harvest" %in% pft.names) { + SticsRFiles::set_param_xml(gen_file, "y0msrac", pft.traits[which(pft.names == "rootmin_harvest")], overwrite = TRUE) + } + + ### Root growth + + # bulk density of soil below which root growth is reduced due to a lack of soil cohesion (g.cm-3) + if ("bd_rootgrowth_reduced" %in% pft.names) { + SticsRFiles::set_param_xml(gen_file, "dacohes", pft.traits[which(pft.names == "bd_rootgrowth_reduced")], overwrite = TRUE) + } + + # bulk density of soil above which root growth is maximal (g.cm-3) + if ("bd_rootgrowth_maximal" %in% pft.names) { + SticsRFiles::set_param_xml(gen_file, "daseuilbas", pft.traits[which(pft.names == "bd_rootgrowth_maximal")], overwrite = TRUE) + } + + # bulk density of soil above which root growth becomes impossible (g.cm-3) + if ("bd_rootgrowth_impossible" %in% pft.names) { + SticsRFiles::set_param_xml(gen_file, "daseuilhaut", pft.traits[which(pft.names == "bd_rootgrowth_impossible")], overwrite = TRUE) + } + + ### Water absorption and nitrogen content of the plant + + # parameter of increase of maximal transpiration when a water stress occurs + if ("maxTPincrease_waterstress" %in% pft.names) { + SticsRFiles::set_param_xml(gen_file, "beta", pft.traits[which(pft.names == "maxTPincrease_waterstress")], overwrite = TRUE) + } + + # root length density (RLD) above which water and N uptake are maximum and independent of RLD + if ("lvopt" %in% pft.names) { + SticsRFiles::set_param_xml(gen_file, "lvopt", pft.traits[which(pft.names == "lvopt")], overwrite = TRUE) + } + + # diffusion coefficient of nitrate N in soil at field capacity + if ("difN_FC" %in% soil.names) { + SticsRFiles::set_param_xml(gen_file, "difN", soil_params[which(soil.names == "difN_FC")], overwrite = TRUE) + } + + # skipping + # concrr: inorganic N concentration (NH4+NO3-N) in the rain + + # minimal amount of rain required to start an automatic fertilisation (N mm.d-1) + if ("plNmin" %in% soil.names) { + SticsRFiles::set_param_xml(gen_file, "plNmin", soil_params[which(soil.names == "plNmin")], overwrite = TRUE) + } + + # skipping, irrlev: + # amount of irrigation applied automatically on the sowing day to allow germination when the model calculates automaticaly + # the amount of irrigations or when the irrigation dates are calculated by sum of temperature + + # minimal amount of N in the plant required to compute INN (kg.ha-1) + if ("QNpltminINN" %in% pft.names) { + SticsRFiles::set_param_xml(gen_file, "QNpltminINN", pft.traits[which(pft.names == "QNpltminINN")], overwrite = TRUE) + } + + ### Soil C and N processes and fertiliser losses + + # minimal temperature for decomposition of humified organic matter (degreeC) + if ("tmin_mineralisation" %in% soil.names) { + SticsRFiles::set_param_xml(gen_file, "tmin_mineralisation", soil_params[which(soil.names == "tmin_mineralisation")], overwrite = TRUE) + } + + # parameter (1/2) of the temperature function on humus decomposition rate + if ("T_p1_Hdecomp_rate" %in% soil.names) { + SticsRFiles::set_param_xml(gen_file, "FTEMh", soil_params[which(soil.names == "T_p1_Hdecomp_rate")], overwrite = TRUE) + } + + # parameter (2/2) of the temperature function on humus decomposition rate + if ("T_p2_Hdecomp_rate" %in% soil.names) { + SticsRFiles::set_param_xml(gen_file, "FTEMha", soil_params[which(soil.names == "T_p2_Hdecomp_rate")], overwrite = TRUE) + } + + # reference temperature for decomposition of humified organic matter + if ("T_r_HOMdecomp" %in% soil.names) { + SticsRFiles::set_param_xml(gen_file, "TREFh", soil_params[which(soil.names == "T_r_HOMdecomp")], overwrite = TRUE) + } + + # parameter (1/2) of the temperature function on decomposition rate of organic residues + if ("FTEMr" %in% soil.names) { + SticsRFiles::set_param_xml(gen_file, "FTEMr", soil_params[which(soil.names == "FTEMr")], overwrite = TRUE) + } + + # parameter (2/2) of the temperature function on decomposition rate of organic residues + if ("FTEMra" %in% soil.names) { + SticsRFiles::set_param_xml(gen_file, "FTEMra", soil_params[which(soil.names == "FTEMra")], overwrite = TRUE) + } + + # reference temperature for decomposition of organic residues + if ("T_r_ORdecomp" %in% soil.names) { + SticsRFiles::set_param_xml(gen_file, "TREFr", soil_params[which(soil.names == "T_r_ORdecomp")], overwrite = TRUE) + } + + # TODO: come back to these + # # not used anymore, or at least not with this name!!! + # # relative potential mineralization rate: K2 = fmin1 * exp(- fmin2*argi) / (1+fmin3*calc) + # if ("FMIN1" %in% soil.names) { + # SticsRFiles::set_param_xml(gen_file, "FMIN1", soil_params[which(soil.names == "FMIN1")], overwrite = TRUE) + # } + # + # # not used anymore, or at least not with this name!!! + # # parameter defining the effect of clay on the potential mineralization rate: K2 = fmin1 * exp(-fmin2*argi) / (1+fmin3*calc) + # if ("FMIN2" %in% soil.names) { + # SticsRFiles::set_param_xml(gen_file, "FMIN2", soil_params[which(soil.names == "FMIN2")], overwrite = TRUE) + # } + # + # # not used anymore, or at least not with this name!!! + # # parameter defining the effect of CaCO3 on the potential mineralization rate: K2 = fmin1 * exp(-fmin2*argi) / (1+fmin3*calc) + # if ("FMIN3" %in% soil.names) { + # SticsRFiles::set_param_xml(gen_file, "FMIN3", soil_params[which(soil.names == "FMIN3")], overwrite = TRUE) + # } + + # N/C ratio of soil humus + if ("Wh" %in% soil.names) { + SticsRFiles::set_param_xml(gen_file, "Wh", soil_params[which(soil.names == "Wh")], overwrite = TRUE) + } + + # soil pH below which NH3 volatilisation derived from fertiliser is nil + if ("pHminvol" %in% soil.names) { + SticsRFiles::set_param_xml(gen_file, "pHminvol", soil_params[which(soil.names == "pHminvol")], overwrite = TRUE) + } + + # soil pH above which NH3 volatilisation derived from fertiliser is maximum + if ("pHmaxvol" %in% soil.names) { + SticsRFiles::set_param_xml(gen_file, "pHmaxvol", soil_params[which(soil.names == "pHmaxvol")], overwrite = TRUE) + } + + # N uptake rate at which fertilizer loss is divided by 2 + if ("Nupt_fertloss_halve" %in% soil.names) { + SticsRFiles::set_param_xml(gen_file, "Vabs2", soil_params[which(soil.names == "Nupt_fertloss_halve")], overwrite = TRUE) + } + + # maximal amount of N immobilised in soil derived from the mineral fertilizer + if ("maxNimm_mineralfert" %in% soil.names) { + SticsRFiles::set_param_xml(gen_file, "Xorgmax", soil_params[which(soil.names == "maxNimm_mineralfert")], overwrite = TRUE) + } + + # relative water content (fraction of field capacity) below which mineralisation rate is nil + if ("hminm" %in% soil.names) { + SticsRFiles::set_param_xml(gen_file, "hminm", soil_params[which(soil.names == "hminm")], overwrite = TRUE) + } + + # relative water content (fraction of field capacity) below which mineralisation rate is maximum + if ("hoptm" %in% soil.names) { + SticsRFiles::set_param_xml(gen_file, "hoptm", soil_params[which(soil.names == "hoptm")], overwrite = TRUE) + } + + # skipping, alphaph: + # maximal soil pH variation per unit of inorganic N added with slurry - XML::saveXML(PEcAn.settings::listToXml(plt_list, "fichierplt"), - file = file.path(pltdir, paste0(names(trait.values)[pft], "_plt.xml")), - prefix = '\n') + # skipping, dphvolmax: + # maximal pH increase following the application of slurry + + # skipping, phvols: + # parameter used to calculate the variation of soil pH after the addition of slurry + + # relative soil mineralisation rate at water saturation + if ("fhminsat" %in% soil.names) { + SticsRFiles::set_param_xml(gen_file, "fhminsat", soil_params[which(soil.names == "fhminsat")], overwrite = TRUE) + } + # reduction factor of decomposition rate of organic residues when mineral N is limiting + if ("Nlim_reductionOMdecomp" %in% soil.names) { + SticsRFiles::set_param_xml(gen_file, "fredkN", soil_params[which(soil.names == "Nlim_reductionOMdecomp")], overwrite = TRUE) } - } # pft-loop ends + # reduction factor of decomposition rate of microbial biomass when mineral N is limiting + if ("Nlim_reductionMBdecomp" %in% soil.names) { + SticsRFiles::set_param_xml(gen_file, "fredlN", soil_params[which(soil.names == "Nlim_reductionMBdecomp")], overwrite = TRUE) + } + + # minimal value for the ratio N/C of the microbial biomass when N limits decomposition + if ("fNCbiomin" %in% soil.names) { + SticsRFiles::set_param_xml(gen_file, "fNCbiomin", soil_params[which(soil.names == "fNCbiomin")], overwrite = TRUE) + } + + # additional reduction factor of residues decomposition rate when mineral N is very limited in soil + if ("fredNsup" %in% soil.names) { + SticsRFiles::set_param_xml(gen_file, "fredNsup", soil_params[which(soil.names == "fredNsup")], overwrite = TRUE) + } + + # maximum priming ratio (relative to SOM decomposition SD rate) + if ("Primingmax" %in% soil.names) { + SticsRFiles::set_param_xml(gen_file, "Primingmax", soil_params[which(soil.names == "Primingmax")], overwrite = TRUE) + } + + ### Nitrification, denitrification and associated N2O emissions + ### TODO: modify these params + + ### Soil hydrology and compaction + + # minimal amount of rain required to produce runoff (mm.d-1) + if ("precmin4runoff" %in% soil.names) { + SticsRFiles::set_param_xml(gen_file, "pminruis", soil_params[which(soil.names == "precmin4runoff")], overwrite = TRUE) + } + + # soil thermal diffusivity (cm2.s-1) + if ("soil_thermal_diffusivity" %in% soil.names) { + SticsRFiles::set_param_xml(gen_file, "diftherm", soil_params[which(soil.names == "soil_thermal_diffusivity")], overwrite = TRUE) + } + + # skipping, bformnappe: + # coefficient for the water table shape (artificially drained soil) + + # drain radius (cm) + if ("rdrain" %in% soil.names) { + SticsRFiles::set_param_xml(gen_file, "rdrain", soil_params[which(soil.names == "rdrain")], overwrite = TRUE) + } + + # soil water potential corresponding to wilting point (Mpa) + if ("SWP_WP" %in% soil.names) { + SticsRFiles::set_param_xml(gen_file, "psihumin", soil_params[which(soil.names == "SWP_WP")], overwrite = TRUE) + } + + # soil water potential corresponding to field capacity (Mpa) + if ("SWP_FC" %in% soil.names) { + SticsRFiles::set_param_xml(gen_file, "psihucc", soil_params[which(soil.names == "SWP_FC")], overwrite = TRUE) + } + + # soil moisture content (fraction of field capacity) above which compaction may occur and delay sowing + if ("SMC_compaction_delay_sow" %in% pft.names) { + SticsRFiles::set_param_xml(gen_file, "prophumtasssem", pft.traits[which(pft.names == "SMC_compaction_delay_sow")], overwrite = TRUE) + } + + # soil moisture content (fraction of field capacity) above which compaction may occur and delay harvest + if ("SMC_compaction_delay_harvest" %in% pft.names) { + SticsRFiles::set_param_xml(gen_file, "prophumtassrec", pft.traits[which(pft.names == "SMC_compaction_delay_harvest")], overwrite = TRUE) + } + + ### skipping + ### Soil tillage if soil compaction activated + + ### Typology of pebbles fertilisers and residues + ### should some of these parameters come from event files? + + ### codetypeng: Types of mineral fertilisers - 1 atm + # 1: Ammonium.nitrate + # 2: Urea.Ammonium.Nitrate.solution + # 3: Urea + # 4: Anhydrous.ammonia + # 5: Ammonium.sulphate + # 6: Ammonium.phosphate + # 7: Calcium.nitrate + # 8: Fixed.efficiency + + # each option has 4 params + # engamm: fraction of ammonium in the N fertilizer + # orgeng: maximal amount of fertilizer N that can be immobilized in the soil (fraction for type 8) + # deneng: maximal fraction of the mineral fertilizer that can be denitrified (used if codedenit is not activated) + # voleng: maximal fraction of mineral fertilizer that can be volatilized + + ### codetypres: Type of residues for decomposition parameters - 21 atm + # 1: Main crop on surface + # 2: Intermediate crop on surface + # 3: Manure on surface + # 4: Green compost on surface + # 5: Sewage sludge on surface + # 6: Vinasse on surface + # 7: Horn on surface + # 8: Grapevine shoots on surface + # 9: Others.1 on surface + # 10: Others.2 on surface + # 11: Main crop ploughed in + # 12: Intermediate crop ploughed in + # 13: Manure ploughed in + # 14: Green compost ploughed in + # 15: Sewage sludge ploughed in + # 16: Vinasse ploughed in + # 17: Cattle horn ploughed in + # 18: Grapevine shoots ploughed in + # 19: Others.1 ploughed in + # 20: Others.2 ploughed in + # 21: Dead roots in soil + + # each option has 17 params + + # fraction of organic residue which is decomposable + if ("fOR_decomp" %in% pft.names) { + SticsRFiles::set_param_xml(gen_file, "CroCo", pft.traits[which(pft.names == "fOR_decomp")], overwrite = TRUE) + } + + # parameter of organic residues decomposition: kres=akres+bkres/CsurNres + if ("ORdecomp_par" %in% pft.names) { + SticsRFiles::set_param_xml(gen_file, "akres", pft.traits[which(pft.names == "ORdecomp_par")], overwrite = TRUE) + } + + # potential rate of decomposition of organic residues: kres=akres+bkres/CsurNres + if ("ORdecomp_rate" %in% pft.names) { + SticsRFiles::set_param_xml(gen_file, "bkres", pft.traits[which(pft.names == "ORdecomp_rate")], overwrite = TRUE) + } + + # parameter determining C/N ratio of biomass during organic residues decomposition: CsurNbio=awb+bwb/CsurNres + if ("awb" %in% pft.names) { + SticsRFiles::set_param_xml(gen_file, "awb", pft.traits[which(pft.names == "awb")], overwrite = TRUE) + } + + # parameter determining C/N ratio of biomass during organic residues decomposition: CsurNbio=awb+bwb/CsurNres + if ("bwb" %in% pft.names) { + SticsRFiles::set_param_xml(gen_file, "bwb", pft.traits[which(pft.names == "bwb")], overwrite = TRUE) + } + + # minimum ratio C/N of microbial biomass decomposing organic residues + if ("minC2N_microbialbiomass" %in% pft.names) { + SticsRFiles::set_param_xml(gen_file, "cwb", pft.traits[which(pft.names == "minC2N_microbialbiomass")], overwrite = TRUE) + } + + # parameter of organic residues humification: hres = 1 - ahres*CsurNres/(bhres+CsurNres) + if ("ahres" %in% pft.names) { + SticsRFiles::set_param_xml(gen_file, "ahres", pft.traits[which(pft.names == "ahres")], overwrite = TRUE) + } + + # parameter of organic residues humification: hres = 1 - ahres*CsurNres/(bhres+CsurNres) + if ("bhres" %in% pft.names) { + SticsRFiles::set_param_xml(gen_file, "bhres", pft.traits[which(pft.names == "bhres")], overwrite = TRUE) + } + + # TODO: we need a soil PFT + + # potential decay rate of microbial biomass decomposing organic residues + if ("microbialbiomass_decay" %in% pft.names) { + SticsRFiles::set_param_xml(gen_file, "kbio", pft.traits[which(pft.names == "microbialbiomass_decay")], overwrite = TRUE) + } + + # Carbon assimilation yield by the microbial biomass during crop residues decomposition + if ("microbialbiomass_C_yield" %in% pft.names) { + SticsRFiles::set_param_xml(gen_file, "yres", pft.traits[which(pft.names == "microbialbiomass_C_yield")], overwrite = TRUE) + } + + # minimum value of C/N ratio of organic residue (g.g-1) + if ("CNresmin" %in% pft.names) { + SticsRFiles::set_param_xml(gen_file, "CNresmin", pft.traits[which(pft.names == "CNresmin")], overwrite = TRUE) + } + + # maximum value of C/N ratio of organic residue (g.g-1) + if ("CNresmax" %in% pft.names) { + SticsRFiles::set_param_xml(gen_file, "CNresmax", pft.traits[which(pft.names == "CNresmax")], overwrite = TRUE) + } + + # skipping, qmulchruis0: + # amount of mulch above which runoff is suppressed + + # skipping, mouillabilmulch: + # maximum wettability of crop mulch + + # skipping, kcouvmlch: + # extinction coefficient connecting the soil cover to the amount of plant mulch + + # skipping, albedomulchresidus: + # albedo of crop mulch + + # skipping, Qmulchdec: + # maximal amount of decomposable mulch + + SticsRFiles::convert_xml2txt(file = gen_file) + + this_usm <- grep(names(trait.values)[pft], usmdirs) + sapply(this_usm, function(x){ + file.copy(file.path(rundir, "tempopar.sti"), file.path(usmdirs[x], "tempopar.sti"), overwrite = TRUE) + }) + + ### new formulations + # DO NOTHING ELSE FOR NOW + + SticsRFiles::convert_xml2txt(file = newf_file) + sapply(this_usm, function(x){ + file.copy(file.path(rundir, "tempoparv6.sti"), file.path(usmdirs[x], "tempoparv6.sti"), overwrite = TRUE) + }) + } @@ -122,34 +1168,158 @@ write.config.STICS <- function(defaults, trait.values, settings, run.id) { # read in template ini file ini_xml <- XML::xmlParse(system.file("pecan_ini.xml", package = "PEcAn.STICS")) - ini_list <- XML::xmlToList(ini_xml) + for(i in seq_along(usmdirs)){ + + # doesn't really matter what these are called, they will all be eventually 'ficini.txt' + ini_file <- file.path(rundir, paste0(basename(usmdirs[i]), "_ini.xml")) + + # write the ini file + XML::saveXML(ini_xml, file = ini_file) + + # DO NOTHING FOR NOW + # but when you do note that this also has multiple options, e.g. + # SticsRFiles::set_param_xml(file = ini_file, param = "lai0", values = 1, select = "plante", select_value = "1", overwrite = TRUE) + if(i > 1){ + # these may or may not be modified depending on how crop cycles work in STICS + # 'snu' is bare soil + # fine for annual crops but need to change for perennials + SticsRFiles::set_param_xml(file = ini_file, param = "stade0", values = "snu", select = "plante", select_value = "1", overwrite = TRUE) + # when snu others are set to 0 by STICS + + }else if(!is.null(settings$run$inputs$poolinitcond)){ + ic_path <- settings$run$inputs$poolinitcond$path + ic_nc <- ncdf4::nc_open(ic_path) + + # initial leaf area index (m2 m-2) + lai0 <- ncdf4::ncvar_get(ic_nc, "LAI") + SticsRFiles::set_param_xml(file = ini_file, param = "lai0", values = lai0, select = "plante", select_value = "1", overwrite = TRUE) + + # initial aerial biomass (kg m-2 --> t ha-1) + masec0 <- ncdf4::ncvar_get(ic_nc, "AGB") + SticsRFiles::set_param_xml(file = ini_file, param = "masec0", values = PEcAn.utils::ud_convert(masec0, "kg m-2", "t ha-1"), select = "plante", select_value = "1", overwrite = TRUE) + + # initial depth of root apex of the crop (m --> cm) + zrac0 <- ncdf4::ncvar_get(ic_nc, "rooting_depth") + if(zrac0 < 0.2) zrac0 <- 0.2 + SticsRFiles::set_param_xml(file = ini_file, param = "zrac0", values = PEcAn.utils::ud_convert(zrac0, "m", "cm"), select = "plante", select_value = "1", overwrite = TRUE) + + # initial grain dry weight - haven't started any simulations from this stage yet + # SticsRFiles::set_param_xml(file = ini_file, param = "magrain0", values = 0, select = "plante", select_value = "1", overwrite = TRUE) + + # initial N amount in the plant (kg m-2 --> kg ha-1) + QNplante0 <- ncdf4::ncvar_get(ic_nc, "plant_nitrogen_content") + SticsRFiles::set_param_xml(file = ini_file, param = "QNplante0", values = PEcAn.utils::ud_convert(QNplante0, "kg m-2", "kg ha-1"), select = "plante", select_value = "1", overwrite = TRUE) + + # Not anymore + # initial reserve of biomass (kg m-2 --> t ha-1) + #resperenne0 <- ncdf4::ncvar_get(ic_nc, "reserve_biomass") + #SticsRFiles::set_param_xml(file = ini_file, param = "resperenne0", values = PEcAn.utils::ud_convert(resperenne0, "kg m-2", "t ha-1"), select = "plante", select_value = "1", overwrite = TRUE) + + # initial root density in each of the five soil layers + densinitial <- ncdf4::ncvar_get(ic_nc, "root_density") + if(all(densinitial==0)) densinitial[1] <- 0.5 # for lev + if(zrac0 == 0.2){ + densinitial[2:5] <-0 + }else if(zrac0 < 0.4){ + densinitial[3:5] <-0 + }else if(zrac0 < 0.6){ + densinitial[4:5] <-0 + }else if(zrac0 < 0.8){ + densinitial[5] <-0 #densinitial layers should not be filled if zrac0 is not there + } + SticsRFiles::set_param_xml(file = ini_file, param = "densinitial", values = densinitial, select = "plante", select_value = "1", overwrite = TRUE) + + # default 'lev' + # SticsRFiles::set_param_xml(file = ini_file, param = "stade0", values = "plt", select = "plante", select_value = "1", overwrite = TRUE) + + ncdf4::nc_close(ic_nc) + } + + SticsRFiles::convert_xml2txt(file = ini_file) + file.rename(file.path(rundir, "ficini.txt"), file.path(usmdirs[i], "ficini.txt")) + } + - # DO NOTHING FOR NOW + ############################ Prepare Soils ################################## - # write the ini file - XML::saveXML(PEcAn.settings::listToXml(ini_list, "initialisations"), - file = file.path(rundir, paste0(defaults$pft$name, "_ini.xml")), - prefix = '\n') + ## this is where we modify soil characteristics + #### THERE IS SOME BUG IN SticsRFiles::convert_xml2txt FOR SOLS.XML + #### I NOW PUT TXT VERSION TO THE MODEL PACKAGE: param.sol + #### TODO: revise others to have txt templates directly in the package + # # changed from FINERT to finert and moved to the sols.xml + # # initial fraction of soil organic N inactive for mineralisation (= stable SON/ total SON) + # if ("FINERT" %in% soil.names) { + # SticsRFiles::set_param_xml(gen_file, "finert", soil_params[which(soil.names == "FINERT")], overwrite = TRUE) + # } - ############################ Prepare Soils ################################## + sols_file <- file.path(rundir, "param.sol") - ## this is where we modify soil characteristics + # cp template sols file (txt) + file.copy(system.file("param.sol", package = "PEcAn.STICS"), sols_file) - # read in template sols file - sols_xml <- XML::xmlParse(system.file("sols.xml", package = "PEcAn.STICS")) - sols_list <- XML::xmlToList(sols_xml) + # check param names + # sols_vals <- SticsRFiles::get_soil_txt(sols_file) - sols_list$sol$.attrs[["nom"]] <- paste0("sol", defaults$pft$name) + str_ns <- paste0(as.numeric(settings$run$site$id) %/% 1e+09, "-", as.numeric(settings$run$site$id) %% 1e+09) - # DO NOTHING FOR NOW + # I guess not important what this is called as long as it's consistent in usms + SticsRFiles::set_soil_txt(file = sols_file, param="typsol", value=paste0("sol", str_ns)) - # write the tec file - XML::saveXML(PEcAn.settings::listToXml(sols_list, "sols"), - file = file.path(rundir, "sols.xml"), - prefix = '\n') + if(!is.null(settings$run$inputs$poolinitcond)){ + ic_path <- settings$run$inputs$poolinitcond$path + ic_nc <- ncdf4::nc_open(ic_path) + + # pH + pH <- ncdf4::ncvar_get(ic_nc, "pH") + pH <- round(pH[1], digits = 1) # STICS uses 1 pH value + SticsRFiles::set_soil_txt(file = sols_file, param="pH", value=pH) + + sapply(1:5, function(x) SticsRFiles::set_soil_txt(file = sols_file, param="epc", value=20, layer = x)) + + # volume_fraction_of_water_in_soil_at_field_capacity + hccf <- ncdf4::ncvar_get(ic_nc, "volume_fraction_of_water_in_soil_at_field_capacity") + hccf <- round(hccf*100, digits = 2) + sapply(seq_along(hccf), function(x) SticsRFiles::set_soil_txt(file = sols_file, param="hccf", value=hccf[x], layer = x)) + + # volume_fraction_of_condensed_water_in_soil_at_wilting_point + hminf <- ncdf4::ncvar_get(ic_nc, "volume_fraction_of_condensed_water_in_soil_at_wilting_point") + hminf <- round(hminf*100, digits = 2) + sapply(seq_along(hminf), function(x) SticsRFiles::set_soil_txt(file = sols_file, param="hminf", value=hminf[x], layer = x)) + + # soil_organic_nitrogen_content + Norg <- ncdf4::ncvar_get(ic_nc, "soil_organic_nitrogen_content") + Norg <- round(Norg[1]*100, digits = 2) # STICS uses 1 Norg value + SticsRFiles::set_soil_txt(file = sols_file, param="Norg", value=Norg) + + # mass_fraction_of_clay_in_soil + argi <- ncdf4::ncvar_get(ic_nc, "mass_fraction_of_clay_in_soil") + argi <- round(argi[1]*100, digits = 0) # STICS uses 1 argi value + SticsRFiles::set_soil_txt(file = sols_file, param="argi", value=argi) + + # soil_density (kg m-3 --> g cm-3) + DAF <- ncdf4::ncvar_get(ic_nc, "soil_density") + DAF <- round(PEcAn.utils::ud_convert(DAF, "kg m-3", "g cm-3"), digits = 1) + sapply(seq_along(DAF), function(x) SticsRFiles::set_soil_txt(file = sols_file, param="DAF", value=DAF[x], layer = x)) + + # c2n_humus + #CsurNsol0 <- ncdf4::ncvar_get(ic_nc, "c2n_humus") + #SticsRFiles::set_soil_txt(file = sols_file, param="CsurNsol", value=CsurNsol0) + + # epd + epd <- rep(10, 5) + sapply(seq_along(epd), function(x) SticsRFiles::set_soil_txt(file = sols_file, param="epd", value=epd[x], layer = x)) + + ncdf4::nc_close(ic_nc) + } + file.copy(sols_file, file.path(usmdirs, "param.sol")) + + # DO NOTHING ELSE FOR NOW + + # this has some bug for sols.xml + # SticsRFiles::convert_xml2txt(file = sols_file, javastics = javastics_path) ######################### Prepare Weather Station File ############################### @@ -157,207 +1327,369 @@ write.config.STICS <- function(defaults, trait.values, settings, run.id) { # read in template sta file sta_xml <- XML::xmlParse(system.file("pecan_sta.xml", package = "PEcAn.STICS")) - sta_list <- XML::xmlToList(sta_xml) - - # change latitute - sta_list[[1]][[3]]$text <- settings$run$site$lat - # DO NOTHING ELSE FOR NOW + # not important what it's called, will be 'station.txt' in the end + sta_file <- file.path(rundir, paste0(str_ns, "_sta.xml")) - # Should these be prepared by met2model.STICS? + XML::saveXML(sta_xml, file = sta_file) - # write the sta file - XML::saveXML(PEcAn.settings::listToXml(sta_list, "fichiersta"), - file = file.path(rundir, paste0(tolower(sub(" .*", "", settings$run$site$name)), "_sta.xml")), - prefix = '\n') + # change latitude + SticsRFiles::set_param_xml(sta_file, "latitude", settings$run$site$lat, overwrite = TRUE) - + SticsRFiles::convert_xml2txt(file = sta_file) + file.copy(file.path(rundir, "station.txt"), file.path(usmdirs, "station.txt")) + # another way to change latitute + # sta_txt <- file.path(rundir, "station.txt") + # SticsRFiles::set_station_txt(sta_txt, param = "latitude", value = settings$run$site$lat) + # DO NOTHING ELSE FOR NOW + # Should these be prepared by met2model.STICS? + ############################## Prepare LAI forcing #################################### ## skipping for now - ############################## Param gen / newform #################################### - - ## DO NOTHING - gen_xml <- XML::xmlParse(system.file("param_gen.xml", package = "PEcAn.STICS")) - gen_list <- XML::xmlToList(gen_xml) - XML::saveXML(PEcAn.settings::listToXml(gen_list, "fichierpar"), - file = file.path(rundir, "param_gen.xml"), - prefix = '\n') - - newf_xml <- XML::xmlParse(system.file("param_newform.xml", package = "PEcAn.STICS")) - newf_list <- XML::xmlToList(newf_xml) - XML::saveXML(PEcAn.settings::listToXml(newf_list, "fichierparamgen"), - file = file.path(rundir, "param_newform.xml"), - prefix = '\n') ############################ Prepare Technical File ################################## ## this is where we modify management practices + ## TODO: use ICASA compatible json file + + ## instead of using a template, this could be easier if we prepare a dataframe and use SticsRFiles::gen_tec_xml + tec_df <- data.frame(Tec_name = "tmp_tec.xml") + + # these shouldn't be empty even if we don't use them (values from timothy example in STICS) + tec_df$iplt0 <- 999 # date of sowing + tec_df$profsem <- 2 # depth of sowing + tec_df$densitesem <- 100 # plant sowing density + tec_df$variete <- 1 # cultivar number corresponding to the cultivar name in the plant file (could be passed via a field activity file) + tec_df$irecbutoir <- 999 #latest date of harvest (imposed if the crop cycle is not finished at this date) + tec_df$profmes <- 120 # depth of measurement of the soil water reserve (cm) + #tec_df$engrais <- 1 # fertilizer type + tec_df$concirr <- 0.11 # concentration of mineral N in irrigation water (kg ha-1 mm-1) + tec_df$ressuite <- 'straw+roots' # type of crop residue + tec_df$h2ograinmax <- 0.32 # maximal water content of fruits at harvest + + # the following formalisms exist in the tec file: + ## supply of organic residus + ## soil tillage + ## sowing + ## phenological stages + ## irrigation + ## fertilisation + ## harvest + ## special techniques + ## soil modification by techniques (compaction-fragmentation) + + # if a field activity file is given, most (all?) of our harvest cases are actually fall under special techniques - cut crop + if(!is.null(settings$run$inputs$fielddata)){ + + events_file <- jsonlite::read_json(settings$run$inputs$fielddata$path, simplifyVector = TRUE)[[1]] + # loop for each USM + for(usmi in seq_along(usmdirs)){ + + usm_years <- c(sapply(strsplit(sub(".*_", "", basename(usmdirs[usmi])), "-"), function(x) (as.numeric(x)))) + # note that usm years can overlap, may need more sophisticated checks + dseq_sub <- dseq[lubridate::year(dseq) %in% usm_years] + + events_sub <- events_file$events[lubridate::year(events_file$events$date) %in% usm_years, ] + + if("planting" %in% events_sub$mgmt_operations_event){ + + pl_date <- events_sub$date[events_sub$mgmt_operations_event == "planting"] + tec_df$iplt0 <- lubridate::yday(as.Date(pl_date)) + + profsem <- events_sub$planting_depth[events_sub$mgmt_operations_event == "planting"] + if(!is.null(profsem)){ + tec_df$profsem <- as.numeric(profsem) # depth of sowing + } + + densitesem <- events_sub$planting_sowing_density[events_sub$mgmt_operations_event == "planting"] + if(!is.null(densitesem)){ + tec_df$densitesem <- as.numeric(densitesem) # plant sowing density + } + + # any other? + } + + if("harvest" %in% events_sub$mgmt_operations_event){ + # param names + h_param_names <- c("julfauche" , # date of each cut for forage crops, julian.d + "hautcoupe" , # cut height for forage crops, m + "lairesiduel", # residual LAI after each cut of forage crop, m2 m-2 + "msresiduel" , # residual aerial biomass after a cut of a forage crop, t.ha-1 + "anitcoupe", + "engraiscoupe", + "tauxexportfauche", + "restit", + "mscoupemini") # amount of mineral N added by fertiliser application at each cut of a forage crop, kg.ha-1 + + + harvest_sub <- events_sub[events_sub$mgmt_operations_event == "harvest",] + + harvest_list <- list() + for(hrow in seq_len(nrow(harvest_sub))){ + + # empty + harvest_df <- data.frame(julfauche = NA, hautcoupe = NA, lairesiduel = NA, msresiduel = NA, anitcoupe = NA) + + + # If given harvest date is within simulation days + # probably need to break down >2 years into multiple usms + if(as.Date(harvest_sub$date[hrow]) %in% dseq_sub){ + + # STICS needs cutting days in cumulative julian days + # e.g. first cutting day of the first simulation year can be 163 (2018-06-13) + # in following years it should be cumulative, meaning a cutting day on 2019-06-12 is 527, not 162 + # the following code should give that + harvest_df$julfauche <- which(dseq_sub == as.Date(harvest_sub$date[hrow])) + lubridate::yday(dseq_sub[1]) - 1 + if("frg" %in% tolower(harvest_sub$harvest_crop) | + "wcl" %in% tolower(harvest_sub$harvest_crop)){ + tec_df$irecbutoir <- 999 + if(!is.null(events_file$rotation)){ + tind <- which(dseq_sub == as.Date(events_file$rotation$rotation_end[usmi])) + lubridate::yday(dseq_sub[1]) - 1 + tec_df$irecbutoir <- ifelse(length(tind) == 0, 999, tind) + } + }else{ + tec_df$irecbutoir <- harvest_df$julfauche + } + harvest_df$hautcoupe <- as.numeric(harvest_sub$harvest_cut_height[harvest_sub$date==harvest_sub$date[hrow]]) # # cut height for forage crops + harvest_df$hautcoupe <- ifelse(harvest_df$hautcoupe == -99, 0.05, harvest_df$hautcoupe) + harvest_df$lairesiduel <- ifelse(harvest_df$hautcoupe < 0.08, 0.2, 0.8) # hardcode for now + harvest_df$msresiduel <- ifelse(harvest_df$hautcoupe < 0.08, 0.05, 0.3) # residual aerial biomass after a cut of a forage crop (t ha-1) + harvest_df$anitcoupe <- 21 # amount of mineral N added by fertiliser application at each cut of a forage crop (kg ha-1) + harvest_df$engraiscoupe <- 0 + harvest_df$tauxexportfauche <- 0 + harvest_df$restit <- 0 + harvest_df$mscoupemini <- 0 + } + + colnames(harvest_df) <- paste0(h_param_names, "_", hrow) + harvest_list[[hrow]] <- harvest_df + } + harvest_tec <- do.call("cbind", harvest_list) + + # need to get these from field data + # cut crop - 1:yes, 2:no + if("frg" %in% tolower(harvest_sub$harvest_crop) | "wcl" %in% tolower(harvest_sub$harvest_crop)){ + harvest_tec$codefauche <- 1 + }else{ + harvest_tec$codefauche <- 2 + } + #harvest_tec$mscoupemini <- 0 # min val of aerial biomass to make a cut + harvest_tec$codemodfauche <- 2 # use calendar days + harvest_tec$hautcoupedefaut <- 0.05 # cut height for forage crops (calendar calculated) + harvest_tec$stadecoupedf <- "rec" + + + } #harvest-if end + + if("organic_material" %in% events_sub$mgmt_operations_event | + "fertilizer" %in% events_sub$mgmt_operations_event){ + # param names + f_param_names <- c("julapN", # date of fertilization, julian.d + "absolute_value/%") # cut height for forage crops, m + + + fert_sub <- events_sub[events_sub$mgmt_operations_event %in% c("organic_material", "fertilizer"),] + + fert_list <- list() + for(frow in seq_len(nrow(fert_sub))){ + + # empty + fert_df <- data.frame(jul = NA, val = NA) + + # If given fertilization date is within simulation days + if(as.Date(fert_sub$date[frow]) %in% dseq_sub){ + + fert_df$jul <- which(dseq_sub == as.Date(fert_sub$date[frow])) + lubridate::yday(dseq_sub[1]) - 1 + + if(fert_sub$mgmt_operations_event[frow] == "organic_material"){ + Nprcnt <- ifelse(as.numeric(fert_sub$organic_material_N_conc[frow]) < 0, 5, as.numeric(fert_sub$organic_material_N_conc[frow])) + fert_df$val <- as.numeric(fert_sub$org_material_applic_amnt[frow]) * (Nprcnt/100) + }else{ + fert_df$val <- as.numeric(fert_sub$N_in_applied_fertilizer[frow]) + } + + } + + colnames(fert_df) <- paste0(f_param_names, "_", frow) + fert_list[[frow]] <- fert_df + } + fert_tec <- do.call("cbind", fert_list) + } #fertilizer-if end + + + # DO NOTHING ELSE FOR NOW + # TODO: ADD OTHER MANAGEMENT + + # same usm -> continue columns + usm_tec_df <- cbind(tec_df, harvest_tec, fert_tec) + + usm_tec_df$ratiol <- 0 + + SticsRFiles::gen_tec_xml(param_df = usm_tec_df, + file=system.file("pecan_tec.xml", package = "PEcAn.STICS"), + out_dir = usmdirs[usmi]) + + # TODO: more than 1 USM, rbind + + SticsRFiles::convert_xml2txt(file = file.path(usmdirs[usmi], "tmp_tec.xml")) + + + } # end-loop over usms + } # TODO: if no events file is given modify other harvest parameters, e.g. harvest decision - # read in template tec file - tec_xml <- XML::xmlParse(system.file("pecan_tec.xml", package = "PEcAn.STICS")) - tec_list <- XML::xmlToList(tec_xml) + ################################ Prepare USM file ###################################### + + # loop for each USM + #ncodesuite <- ifelse(length(usmdirs) > 1, 1,0) - # If harvest file is given, use given dates - # this will need more complicated checks and file formats - if(!is.null(settings$run$inputs$harvest)){ + for(usmi in seq_along(usmdirs)){ - h_days <- as.matrix(utils::read.table(settings$run$inputs$harvest$path, header = TRUE, sep = ",")) + #usm_years <- years_requested[(usmi*2-1):(usmi*2)] + usm_years <- c(sapply(strsplit(sub(".*_", "", basename(usmdirs[usmi])), "-"), function(x) (as.numeric(x)))) + dseq_sub <- dseq[lubridate::year(dseq) %in% usm_years] - # probably should use nicer list manipulation techniques, but these template files are static for now - # save last list to add at the end - attr_sublist <- tec_list[[8]][[1]][[1]][[2]][[2]][[1]][[4]] - tec_list[[8]][[1]][[1]][[2]][[2]][[1]][[4]] <- NULL + # read in template USM (Unit of SiMulation) file, has the master settings, file names etc. + usm_file <- file.path(usmdirs[usmi], "new_travail.usm") - list_no <- 2 + # cp template usm file + file.copy(system.file("template.usm", package = "PEcAn.STICS"), usm_file) - for(hrow in seq_len(nrow(h_days))){ - - - harvest_list <- tec_list[[8]][[1]][[1]][[2]][[2]][[1]][[2]] # refreshing the "template" - intervention_names <- names(tec_list[[8]][[1]][[1]][[2]][[2]][[1]][[2]]) - - # If given harvest date is within simulation days - if(as.Date(h_days[hrow, 2], origin = paste0(h_days[hrow, 1], "-01-01")) %in% dseq){ - - # STICS needs cutting days in cumulative julian days - # e.g. first cutting day of the first simulation year can be 163 (2018-06-13) - # in following years it should be cumulative, meaning a cutting day on 2019-06-12 is 527, not 162 - # the following code should give that - harvest_list$colonne$text <- which(dseq == as.Date(h_days[hrow, 2], origin = paste0(h_days[hrow, 1], "-01-01"))) + lubridate::yday(dseq[1]) - 2 - - tec_list[[8]][[1]][[1]][[2]][[2]][[1]][[list_no]] <- harvest_list - - list_no <- list_no + 1 - } + # Type of LAI simulation + # 0 = culture (LAI calculated by the model), 1 = feuille (LAI forced) + SticsRFiles::set_usm_txt(usm_file, "codesimul", "culture", append = FALSE) # hardcode for now + + # use optimization + # 0 = no; 1 = yes main plant; 2 = yes associated plant + SticsRFiles::set_usm_txt(usm_file, "codeoptim", 0, append = FALSE) + + # option to simulate several + # successive USM (0 = no, 1 = yes) + if(usmi == 1){ + SticsRFiles::set_usm_txt(usm_file, "codesuite", 0, append = FALSE) + }else{ + SticsRFiles::set_usm_txt(usm_file, "codesuite", 1, append = FALSE) + } + + # number of simulated plants (sole crop=1; intercropping=2) + SticsRFiles::set_usm_txt(usm_file, "nbplantes", 1, append = FALSE) # hardcode for now + + # pft name + SticsRFiles::set_usm_txt(usm_file, "nom", basename(usmdirs[usmi]), append = FALSE) + + + ## handle dates, also for partial year(s) + ## needs developing with longer runs + if(usmi == 1){ + # beginning day of the simulation (julian.d) + # end day of the simulation (julian.d) (at the end of consecutive years, i.e. can be greater than 366) + SticsRFiles::set_usm_txt(usm_file, "datedebut", lubridate::yday(settings$run$start.date), append = FALSE) + SticsRFiles::set_usm_txt(usm_file, "datefin", (lubridate::yday(settings$run$start.date) + length(dseq_sub) - 1), append = FALSE) + }else{ + SticsRFiles::set_usm_txt(usm_file, "datedebut", 1, append = FALSE) # for now! + SticsRFiles::set_usm_txt(usm_file, "datefin", length(dseq_sub), append = FALSE) } - # this means we have prescribed more than 2 cutting days - if(list_no > 4){ - names(tec_list[[8]][[1]][[1]][[2]][[2]][[1]])[3:(list_no-1)] <- "intervention" - - #add the last sublist back - attr_sublist["nb_interventions"] <- list_no - 2 - tec_list[[8]][[1]][[1]][[2]][[2]][[1]][[".attrs"]] <- attr_sublist - + # name of the initialization file + SticsRFiles::set_usm_txt(usm_file, "finit", paste0(basename(usmdirs[usmi]), "_ini.xml"), append = FALSE) + + # soil number + SticsRFiles::set_usm_txt(usm_file, "numsol", 1, append = FALSE) + + # name of the soil in the sols.xml file + SticsRFiles::set_usm_txt(usm_file, "nomsol", paste0("sol", str_ns), append = FALSE) + + # name of the weather station file + SticsRFiles::set_usm_txt(usm_file, "fstation", paste0(str_ns, "_sta.xml"), append = FALSE) + + # name of the first climate file + SticsRFiles::set_usm_txt(usm_file, "fclim1", paste0(str_ns, ".", usm_years[1]), append = FALSE) + + # name of the last climate file + if(length(usm_years) == 2){ + SticsRFiles::set_usm_txt(usm_file, "fclim2", paste0(str_ns, ".", usm_years[2]), append = FALSE) + }else{ + # repeat same year + SticsRFiles::set_usm_txt(usm_file, "fclim2", paste0(str_ns, ".", usm_years[1]), append = FALSE) } - + + # number of simulation years + SticsRFiles::set_usm_txt(usm_file, "nbans", length(unique(usm_years)), append = FALSE) # hardcode for now + + # number of calendar years involved in the crop cycle + # 1 = 1 year e.g. for spring crops, 0 = two years, e.g. for winter crops + culturean <- ifelse( length(unique(usm_years)) == 2, 0, 1) + SticsRFiles::set_usm_txt(usm_file, "culturean", culturean, append = FALSE) #hardcoding this for now, if passed as a trait from priors it breaks sensitivity analysis + # probably best to pass this via the json file + + # name of the plant file for main plant + if(length(plt_files) < usmi){ + # multiple usms, 1 plt file = same spp, consecutive rotations, but hacky + SticsRFiles::set_usm_txt(usm_file, "fplt1", basename(plt_files[[1]]), append = FALSE) + }else{ + SticsRFiles::set_usm_txt(usm_file, "fplt1", basename(plt_files[[usmi]]), append = FALSE) + } + + + # name of the technical file for main plant + # does this even matter? + SticsRFiles::set_usm_txt(usm_file, "ftec1", "tmp_tec.xml", append = FALSE) + + # name of the LAI forcing file for main plant (null if none) + SticsRFiles::set_usm_txt(usm_file, "flai1", "default.lai", append = FALSE) # hardcode for now, doesn't matter when codesimul==0 + + # TODO: more than 1 PFTs + # STICS can run 2 PFTs max: main crop + intercrop } - - # OTHERWISE DO NOTHING FOR NOW - - # write the tec file - XML::saveXML(PEcAn.settings::listToXml(tec_list, "fichiertec"), - file = file.path(rundir, paste0(defaults$pft$name, "_tec.xml")), - prefix = '\n') - - - - - ################################ Prepare USM file ###################################### - # TODO: more than 1 USM and PFTs (STICS can run 2 PFTs max: main crop + intercrop) - - # pft name - usm_list$usm$.attrs[["nom"]] <- defaults$pft$name - - # beginning day of the simulation (julian.d) - usm_list$usm$datedebut <- lubridate::yday(settings$run$start.date) - - # end day of the simulation (julian.d) (at the end of consecutive years, i.e. can be greater than 366) - usm_list$usm$datefin <- usm_list$usm$datedebut + length(dseq) - 1 - - # name of the initialization file - usm_list$usm$finit <- paste0(defaults$pft$name, "_ini.xml") - - # name of the soil in the sols.xml file - usm_list$usm$nomsol <- paste0("sol", defaults$pft$name) - - # name of the weather station file - usm_list$usm$fstation <- paste0(tolower(sub(" .*", "", settings$run$site$name)), "_sta.xml") - - # name of the first climate file - usm_list$usm$fclim1 <- paste0(tolower(sub(" .*", "", settings$run$site$name)), ".", lubridate::year(settings$run$start.date)) - - - # name of the last climate file - usm_list$usm$fclim2 <- paste0(tolower(sub(" .*", "", settings$run$site$name)), ".", lubridate::year(settings$run$end.date)) - - # number of calendar years involved in the crop cycle - # 1 = 1 year e.g. for spring crops, 0 = two years, e.g. for winter crops - usm_list$usm$culturean <- trait.values$timothy$crop_cycle - - # number of simulated plants (sole crop=1; intercropping=2) - usm_list$usm$nbplantes <- 1 # hardcode for now - - # Type of LAI simulation - # 0 = culture (LAI calculated by the model), 1 = feuille (LAI forced) - usm_list$usm$codesimul <- 0 # hardcode for now - - # name of the plant file for main plant - usm_list[[1]][[11]]$fplt <- paste0(defaults$pft$name, "_plt.xml") - - # name of the technical file for main plant - usm_list[[1]][[11]]$ftec <- paste0(defaults$pft$name, "_tec.xml") - - # name of the LAI forcing file for main plant (null if none) - usm_list[[1]][[11]]$flai <- "null" # hardcode for now - - # name of the plant file for associated plant (intercropping) - usm_list[[1]][[12]]$fplt <- "null" # hardcode for now - - # name of the technical file for associated plant (intercropping) - usm_list[[1]][[12]]$ftec <- "null" # hardcode for now - - # name of the LAI forcing file for associated plant (intercropping) (null if none) - usm_list[[1]][[12]]$flai <- "null" # hardcode for now - - # write USMs - XML::saveXML(PEcAn.settings::listToXml(usm_list, "usms"), - file = file.path(rundir, "usms.xml"), - prefix = '\n') + ################################ Prepare Run ###################################### # symlink climate files met_path <- settings$run$inputs$met$path - for(clim in seq(lubridate::year(settings$run$start.date), lubridate::year(settings$run$end.date))){ - met_file <- gsub(paste0(lubridate::year(settings$run$start.date), ".climate"), paste0(clim, ".climate"), met_path) - clim_file <- file.path(rundir, paste0(tolower(sub(" .*", "", settings$run$site$name)), ".", clim)) - file.symlink(met_file, clim_file) + + for(usmi in seq_along(usmdirs)){ + + usm_years <- c(sapply(strsplit(sub(".*_", "", basename(usmdirs)[usmi]), "-"), function(x) (as.numeric(x)))) + dseq_sub <- dseq[lubridate::year(dseq) %in% usm_years] + + clim_list <- list() # temporary solution + for(clim in seq_along(usm_years)){ + # currently assuming only first year file has been passed to the settings, modify met2model if changing the logic + met_file <- gsub(paste0(lubridate::year(settings$run$start.date), ".climate"), paste0(usm_years[clim], ".climate"), met_path) + clim_list[[clim]] <- utils::read.table(met_file) + } + clim_run <- do.call("rbind", clim_list) + utils::write.table(clim_run, file.path(usmdirs[usmi], "climat.txt"), col.names = FALSE, row.names = FALSE) + } - - # stics path - stics_path <- settings$model$binary # symlink to binary file.symlink(stics_path, bindir) + stics_exe <- file.path(bindir, basename(stics_path)) - # generate STICS input files using JavaStics - jexe <- file.path(gsub("bin","", dirname(stics_path)), "JavaSticsCmd.exe") - - usm_name <- defaults$pft$name - - cmd_generate <- paste("java -jar", jexe,"--generate-txt", rundir, usm_name) + # symlink *.mod files + file.symlink(system.file("var.mod", package = "PEcAn.STICS"), file.path(usmdirs, "var.mod")) + file.symlink(system.file("rap.mod", package = "PEcAn.STICS"), file.path(usmdirs, "rap.mod")) + file.symlink(system.file("prof.mod", package = "PEcAn.STICS"), file.path(usmdirs, "prof.mod")) - # copy *.mod files - mod_files <- c(file.path(gsub("bin","example", dirname(stics_path)), "var.mod"), - file.path(gsub("bin","example", dirname(stics_path)), "rap.mod"), - file.path(gsub("bin","example", dirname(stics_path)), "prof.mod")) - file.copy(mod_files, rundir) + #cmd_run <- paste("java -jar", jexe,"--run", rundir, usm_name) - cmd_run <- paste("java -jar", jexe,"--run", rundir, usm_name) + # using SticsOnR wrapper in job.sh now - SticsOnR::stics_wrapper(model_options = wrapper_options) + # used to be: + # cmd_generate <- paste("java -jar", jexe,"--generate-txt", rundir, usm_name) + # cmd_run <- paste("java -jar", jexe,"--run", rundir, usm_name) - #----------------------------------------------------------------------- # create launch script (which will create symlink) if (!is.null(settings$model$jobtemplate) && file.exists(settings$model$jobtemplate)) { @@ -396,10 +1728,16 @@ write.config.STICS <- function(defaults, trait.values, settings, run.id) { jobsh <- gsub("@OUTDIR@", outdir, jobsh) jobsh <- gsub("@RUNDIR@", rundir, jobsh) - jobsh <- gsub("@MODFILE@", paste0("mod_s", usm_name, ".sti"), jobsh) + if(length(usmdirs)>1){ + jobsh <- gsub("@SUCCESSIVE_USMS@", paste0("list(c('", paste(basename(usmdirs), collapse="','"), "'))"), jobsh) + }else{ + jobsh <- gsub("@SUCCESSIVE_USMS@", 'NULL', jobsh) + } + + jobsh <- gsub("@USMDIR@", usmdirs[1], jobsh) # for now - jobsh <- gsub("@CMD_GENERATE@", cmd_generate, jobsh) - jobsh <- gsub("@CMD_RUN@", cmd_run, jobsh) + jobsh <- gsub("@MODFILE@", paste0("mod_s", basename(usmdirs[1]), ".sti"), jobsh) + jobsh <- gsub("@STICSEXE@", stics_exe, jobsh) writeLines(jobsh, con = file.path(settings$rundir, run.id, "job.sh")) Sys.chmod(file.path(settings$rundir, run.id, "job.sh")) diff --git a/models/stics/inst/crop_plt.xml b/models/stics/inst/crop_plt.xml index a0596782441..5e06c48af0b 100644 --- a/models/stics/inst/crop_plt.xml +++ b/models/stics/inst/crop_plt.xml @@ -3,7 +3,7 @@ - + fou @@ -56,7 +58,6 @@ 10.00000 - -999 -999 -999 @@ -78,7 +86,6 @@ 0.00000 - 0 200.00000 - 140.00000 0.00000 - 0.02000 - 0.70000 0.00000 25.00000 30.00000 @@ -131,24 +136,20 @@ 0.0000 @@ -161,11 +162,8 @@ - 1 20 - 10 15 2.6 2.7 @@ -186,25 +182,57 @@ 25.00000 - 377.000 246.000 - 0.35500 0.00000 0.00000 + + + + 0.879 @@ -237,21 +259,23 @@ - 0.00000 - 0.00000 - 0.01757 - 0.01100 - 0.55000 0.00000 rec 0.00000 0.30000 + 0.02 + + + @@ -294,21 +344,18 @@ 2 - -6.00000 -20.00000 @@ -321,14 +368,11 @@ - 13.9 - 2.2 0.90000 0.15000 0.60000 0.70000 0.40000 - 0.00800 0.00500 - @@ -68,7 +68,6 @@ 0.08150 0.20000 0.70000 - 0.70000 0.70000 @@ -78,9 +77,7 @@ 1.40000 0.50000 - 0.02000 0.04500 - 0.02000 10.00000 20.00000 0.00000 @@ -103,10 +100,13 @@ 0.10300 12.00000 15.00000 - 0.65000 - 0.00060 - 0.02720 - 0.01670 + 0.0007 + 0.02519 + 0.015 + 0.11200 + 8.50000 + 0.06000 + 11.00000 0.10500 5.50000 8.50000 @@ -136,7 +136,7 @@ 47 25 148 - 3.0 + 3.0 - + + - - + + 0.00100 + 0.10000 + + + + + - - - - - - - - - - - - - - - + + + + + + + + + + + + 1 + + + - - - - - - - + + + + + diff --git a/models/stics/inst/pecan_ini.xml b/models/stics/inst/pecan_ini.xml index b487c804f27..2ead14bfc07 100644 --- a/models/stics/inst/pecan_ini.xml +++ b/models/stics/inst/pecan_ini.xml @@ -1,59 +1,86 @@ - - 1 - - lev - 0.0000 + + 1 + + lev + 0.0000 + 50.0000 + - - snu - + 1.0000 + + + 0.0000 + + 30.0000 + 15.0000 + 0.0000 + 0.0000 + 0.0000 + + + + snu + + + - - - 0.0000 - 0.0000 - 0.0000 - 0.0000 - 0.0000 - - - 10.0000 - 5.0000 - 5.0000 - 0.0000 - 0.0000 - - - 10.0000 - 5.0000 - 5.0000 - 0.0000 - 0.0000 - - + + + + + + + + + + + + + + + 0.0000 + 0.0000 + 0.0000 + 0.0000 + 0.0000 + + + 10.0000 + 5.0000 + 5.0000 + 0.0000 + 0.0000 + + + 10.0000 + 5.0000 + 5.0000 + 0.0000 + 0.0000 + + + + 0.0 + 0.0 + 0.0 + 0.0 + + diff --git a/models/stics/inst/pecan_sta.xml b/models/stics/inst/pecan_sta.xml index a10b4f2773c..01cd3f9c2a5 100644 --- a/models/stics/inst/pecan_sta.xml +++ b/models/stics/inst/pecan_sta.xml @@ -1,65 +1,86 @@ - - - 2.50000 - 0.00000 - 46.50000 - 1000.00000 - 20.000000 - - - + + + 50.00000 + 0.23000 + 0.18000 + 0.62000 + 1.00000 + + + + + 0.70000 + 6.00000 + 0.50000 + 0.16000 + 0.00400 + 0.59000 + + + + + diff --git a/models/stics/inst/pecan_tec.xml b/models/stics/inst/pecan_tec.xml index c333c76f133..c9bfc287a25 100644 --- a/models/stics/inst/pecan_tec.xml +++ b/models/stics/inst/pecan_tec.xml @@ -1,319 +1,361 @@ - - + + - - - - - - - - - - - - - - - - - - - - - - 999 - 2.00000 - 100.00000 - 1 - - - - + + + + + + + + + + + + + + + + + + + + + 0 + 0.00 + 6.00 + + + + + 0 + 5.50000 + 450 + 1 + + + + - 999 - - + + 999 + 442 + 999 + 999 + 999 + 999 + 999 + 999 + 999 + + + + + 1.00000 - - - - 1 - 0.11000 - - + + + + 20.00000 + + + 0.00000 + + + + + + + + + + 180 + 80 + 1 + + + + + + 245 + straw+roots + + + - - - + + + + + + + + - - - + + 1.10000 + 1.30000 + 0.00500 + 0.05000 + + + + + + + + diff --git a/models/stics/inst/prof.mod b/models/stics/inst/prof.mod new file mode 100644 index 00000000000..e0b00c6fe59 --- /dev/null +++ b/models/stics/inst/prof.mod @@ -0,0 +1,4 @@ +2 +Chum +10 +01 01 2000 diff --git a/models/stics/inst/rap.mod b/models/stics/inst/rap.mod new file mode 100644 index 00000000000..d64396c2bac --- /dev/null +++ b/models/stics/inst/rap.mod @@ -0,0 +1,15 @@ +1 +1 +2 +1 +rec +masec(n) +mafruit +iflos +imats +irecs +laimax +cprecip +cet +QNplante +Qles diff --git a/models/stics/inst/sols.xml b/models/stics/inst/sols.xml index fa606e236e3..0a0e3d5d0d2 100644 --- a/models/stics/inst/sols.xml +++ b/models/stics/inst/sols.xml @@ -1,121 +1,122 @@ - - - - 17.0 - 0.1500 - 15.0000 - 1.0000 - 6.5000 - 0.0100 - 0.2000 - 4.0000 - 0.0000 - 200.0000 - 50.0000 - 0.5000 - 60.0000 - 5.0000 - 0.0100 - 0.0000 - 0.3300 - - - - - - - - - - - - - - - - - - - 20.00 - 16.00 - 4.00 - 1.20 - 0.00 - 1 - 50.00 - 5 - - - 20.00 - 15.30 - 9.30 - 1.30 - 0.00 - 1 - 50.00 - 2 - - - 60.00 - 15.30 - 9.30 - 1.30 - 0.00 - 1 - 50.00 - 1 - - - 0.00 - 0.00 - 0.00 - 0.00 - 0.00 - 1 - 50.00 - 1 - - - 0.00 - 0.00 - 0.00 - 0.00 - 0.00 - 1 - 50.00 - 1 - + + + + 30.2 + 0.2700 + 40.0000 + 0.0000 + 7.0000 + 0.2000 + 0.1700 + 12.0000 + 0.0000 + 200.0000 + 50.0000 + 0.5000 + 60.0000 + 5.0000 + 0.0100 + 0.0000 + 0.65000 + 0.3300 + + + + + + + + + + + + + + + + + + + 20.00 + 46.80 + 26.20 + 1.08 + 0.00 + 1 + 50.00 + 10 + + + 20.00 + 46.40 + 27.40 + 1.09 + 0.00 + 1 + 50.00 + 10 + + + 20.00 + 48.50 + 29.10 + 1.02 + 0.00 + 1 + 50.00 + 10 + + + 20.00 + 50.10 + 25.50 + 0.99 + 0.00 + 1 + 50.00 + 10 + + + 20.00 + 50.10 + 25.50 + 0.99 + 0.00 + 1 + 50.00 + 10 + diff --git a/models/stics/inst/template.job b/models/stics/inst/template.job index bf96e69685c..5f539f194be 100644 --- a/models/stics/inst/template.job +++ b/models/stics/inst/template.job @@ -15,11 +15,18 @@ if [ ! -e "@OUTDIR@/@MODFILE@" ]; then cd "@RUNDIR@" - @CMD_GENERATE@ - @CMD_RUN@ + # call stics_wrapper + echo " + javastics_path = '@RUNDIR@' + stics_exe = '@STICSEXE@' + sticsrun_dir = '@RUNDIR@' + successive_usms = @SUCCESSIVE_USMS@ + wrapper_options = SticsOnR::stics_wrapper_options(stics_exe = stics_exe, workspace = sticsrun_dir, successive = successive_usms) + SticsOnR::stics_wrapper(model_options = wrapper_options) + " | R --vanilla # copy log - mv "@RUNDIR@/stics_errors.log" "@OUTDIR@" + cp "@USMDIR@/stics_errors.log" "@OUTDIR@" STATUS=$? @@ -30,7 +37,9 @@ if [ ! -e "@OUTDIR@/@MODFILE@" ]; then fi # copy output - mv @RUNDIR@/*.sti @OUTDIR@ + mv @RUNDIR@/**/mod_b* @OUTDIR@ + mv @RUNDIR@/**/mod_s* @OUTDIR@ + mv @RUNDIR@/**/modh* @OUTDIR@ # convert to MsTMIP echo "library (PEcAn.STICS) diff --git a/models/stics/inst/template.usm b/models/stics/inst/template.usm new file mode 100644 index 00000000000..a2aa436effe --- /dev/null +++ b/models/stics/inst/template.usm @@ -0,0 +1,36 @@ +:codesimul +codesimul_placeholder +:codeoptim +codeoptim_placeholder +:codesuite +codesuite_placeholder +:nbplantes +nbplantes_placeholder +:nom +nom_placeholder +:datedebut +datedebut_placeholder +:datefin +datefin_placeholder +:finit +finit_placeholder +:numsol +numsol_placeholder +:nomsol +nomsol_placeholder +:fstation +fstation_placeholder +:fclim1 +fclim1_placeholder +:fclim2 +fclim2_placeholder +:nbans +nbans_placeholder +:culturean +culturean_placeholder +:fplt1 +fplt1_placeholder +:ftec1 +ftec1_placeholder +:flai1 +flai1_placeholder diff --git a/models/stics/inst/usms.xml b/models/stics/inst/usms.xml index 74792d30b5c..5b927fadb77 100644 --- a/models/stics/inst/usms.xml +++ b/models/stics/inst/usms.xml @@ -1,16 +1,16 @@ - - - + + + @DATEBUT@ @DATEFIN@ - @FINIT@ + @FINIT@ @NOMSOL@ @FSTATION@ @FCLIM1@ - @FCLIM2@ + @FCLIM2@ @CULTUREAN@ @NBPLANTES@ - @CODESIMUL@ + @CODESIMUL@ @FPLT1@ @FTEC1@ @@ -20,6 +20,6 @@ @FPLT2@ @FTEC2@ @FLAI2@ - - + + diff --git a/models/stics/inst/var.mod b/models/stics/inst/var.mod new file mode 100644 index 00000000000..92a6e03b620 --- /dev/null +++ b/models/stics/inst/var.mod @@ -0,0 +1,50 @@ +lai(n) +masec(n) +masec_kg_ha +mafruit +HR(1) +HR(2) +HR(3) +HR(4) +HR(5) +resmes +drain +esol +et +zrac +tcult +AZnit(1) +AZnit(2) +AZnit(3) +AZnit(4) +AZnit(5) +Qles +QNplante +CNplante +azomes +inn +chargefruit +AZamm(1) +AZamm(2) +AZamm(3) +AZamm(4) +AZamm(5) +CNgrain +concNO3les +drat +fapar +hauteur +Hmax +humidite +LRACH(1) +LRACH(2) +LRACH(3) +LRACH(4) +LRACH(5) +mafrais +pdsfruitfrais +Qdrain +rnet +CO2sol +dltams(n) +dltaremobil diff --git a/models/stics/tests/testthat.R b/models/stics/tests/testthat.R index b7618603b85..0f26d96ee97 100644 --- a/models/stics/tests/testthat.R +++ b/models/stics/tests/testthat.R @@ -1,11 +1,3 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2012 University of Illinois, NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# University of Illinois/NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- library(testthat) library(PEcAn.utils) diff --git a/models/template/DESCRIPTION b/models/template/DESCRIPTION index ea29b9eed5a..7e0aea20f21 100644 --- a/models/template/DESCRIPTION +++ b/models/template/DESCRIPTION @@ -1,8 +1,7 @@ Package: PEcAn.ModelName Type: Package Title: PEcAn Package for Integration of the ModelName Model -Version: 1.7.2 -Date: 2021-10-04 +Version: 1.8.0.9000 Authors@R: c(person("Jane", "Doe", role = c("aut", "cre"), email = "jdoe@illinois.edu"), person("John", "Doe", role = c("aut"))) @@ -20,4 +19,4 @@ Copyright: Authors LazyLoad: yes LazyData: FALSE Encoding: UTF-8 -RoxygenNote: 7.2.3 +RoxygenNote: 7.3.2 diff --git a/models/template/LICENSE b/models/template/LICENSE index 5a9e44128f1..09ef35a60b4 100644 --- a/models/template/LICENSE +++ b/models/template/LICENSE @@ -1,34 +1,3 @@ -## This is the master copy of the PEcAn License - -University of Illinois/NCSA Open Source License - -Copyright (c) 2012, University of Illinois, NCSA. All rights reserved. - -PEcAn project -www.pecanproject.org - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal with the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -- Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimers. -- Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimers in the - documentation and/or other materials provided with the distribution. -- Neither the names of University of Illinois, NCSA, nor the names - of its contributors may be used to endorse or promote products - derived from this Software without specific prior written permission. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR -ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF -CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE SOFTWARE. - +YEAR: 2024 +COPYRIGHT HOLDER: PEcAn Project +ORGANIZATION: PEcAn Project, authors affiliations diff --git a/models/template/NEWS.md b/models/template/NEWS.md new file mode 100644 index 00000000000..113ba150e18 --- /dev/null +++ b/models/template/NEWS.md @@ -0,0 +1,7 @@ +# PEcAn.ModelName 1.8.0.9000 + +## License change +* PEcAn.ModelName is now distributed under the BSD three-clause license instead of the NCSA Open Source license. + +## Added +* Added a `NEWS.md` file to track changes to the package. Prior to this point changes are tracked in the main CHANGELOG for the PEcAn repository. diff --git a/models/template/R/met2model.MODEL.R b/models/template/R/met2model.MODEL.R index 7c15426e085..2d3f7012e33 100644 --- a/models/template/R/met2model.MODEL.R +++ b/models/template/R/met2model.MODEL.R @@ -1,13 +1,3 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2012 University of Illinois, NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# University of Illinois/NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- - -##-------------------------------------------------------------------------------------------------# ##' Converts a met CF file to a model specific met file. The input ##' files are calld /.YYYY.cf ##' @@ -25,7 +15,7 @@ met2model.MODEL <- function(in.path, in.prefix, outfolder, overwrite = FALSE) { PEcAn.logger::logger.severe("NOT IMPLEMENTED") # Please follow the PEcAn style guide: - # https://pecanproject.github.io/pecan-documentation/master/coding-style.html + # https://pecanproject.github.io/pecan-documentation/latest/coding-style.html # Note that `library()` calls should _never_ appear here; instead, put # packages dependencies in the DESCRIPTION file, under "Imports:". diff --git a/models/template/R/model2netcdf.MODEL.R b/models/template/R/model2netcdf.MODEL.R index 230deb181da..2fce61d8925 100644 --- a/models/template/R/model2netcdf.MODEL.R +++ b/models/template/R/model2netcdf.MODEL.R @@ -1,13 +1,3 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2012 University of Illinois, NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# University of Illinois/NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- - -##-------------------------------------------------------------------------------------------------# ##' Convert MODEL output into the NACP Intercomparison format (ALMA using netCDF) ##' ##' @name model2netcdf.MODEL diff --git a/models/template/R/version.R b/models/template/R/version.R new file mode 100644 index 00000000000..0e58d885272 --- /dev/null +++ b/models/template/R/version.R @@ -0,0 +1,3 @@ +# Set at package install time, used by pecan.all::pecan_version() +# to identify development versions of packages +.build_hash <- Sys.getenv("PECAN_GIT_REV", "unknown") diff --git a/models/template/R/write.config.MODEL.R b/models/template/R/write.config.MODEL.R index 674eda0b710..6f2dbc70ed0 100644 --- a/models/template/R/write.config.MODEL.R +++ b/models/template/R/write.config.MODEL.R @@ -1,13 +1,3 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2012 University of Illinois, NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# University of Illinois/NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- - -##-------------------------------------------------------------------------------------------------# ##' Writes a MODEL config file. ##' ##' Requires a pft xml object, a list of trait values for a single model run, diff --git a/models/template/inst/template_geo.job b/models/template/inst/template_geo.job index 026e7e29db9..19dcb7d8bdd 100644 --- a/models/template/inst/template_geo.job +++ b/models/template/inst/template_geo.job @@ -42,5 +42,13 @@ cp "@RUNDIR@/README.txt" "@OUTDIR@/README.txt" # host specific teardown @HOST_TEARDOWN@ +#copy files back. +@CPRUNCMD@ +@CPOUTCMD@ + +#delete files in the run and out folder. +@RMRUNDIRCMD@ +@RMOUTDIRCMD@ + # all done -echo -e "MODEL FINISHED\nLogfile is located at '@OUTDIR@/logfile.txt'" >&3 +echo -e "MODEL FINISHED\nLogfile is located at '@OUTDIR@/logfile.txt'" >&3 \ No newline at end of file diff --git a/models/template/tests/testthat.R b/models/template/tests/testthat.R index d93798b4ffe..f44dabc6ffb 100644 --- a/models/template/tests/testthat.R +++ b/models/template/tests/testthat.R @@ -1,11 +1,3 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2012 University of Illinois, NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# University of Illinois/NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- library(testthat) library(PEcAn.utils) diff --git a/modex_libs b/modex_libs deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/modules/allometry/DESCRIPTION b/modules/allometry/DESCRIPTION index cf4f31f3d5a..c031dc02766 100644 --- a/modules/allometry/DESCRIPTION +++ b/modules/allometry/DESCRIPTION @@ -1,8 +1,7 @@ Package: PEcAn.allometry Type: Package Title: PEcAn Allometry Functions -Version: 1.7.2 -Date: 2021-10-04 +Version: 1.7.3.9000 Authors@R: c(person("Mike", "Dietze", role = c("aut", "cre"), email = "dietze@bu.edu"), person("Shashank", "Singh", role = c("ctb")), @@ -20,9 +19,9 @@ Imports: tools, PEcAn.DB Suggests: - knitr, + knitr (>= 1.42), markdown, - rmarkdown, + rmarkdown (>= 2.19), testthat (>= 1.0.2), withr License: BSD_3_clause + file LICENSE @@ -30,5 +29,5 @@ Copyright: Authors LazyLoad: yes LazyData: FALSE Encoding: UTF-8 -RoxygenNote: 7.2.3 -VignetteBuilder: knitr +RoxygenNote: 7.3.2 +VignetteBuilder: knitr, rmarkdown diff --git a/modules/allometry/LICENSE b/modules/allometry/LICENSE index 9e38c2dc685..09ef35a60b4 100644 --- a/modules/allometry/LICENSE +++ b/modules/allometry/LICENSE @@ -1,29 +1,3 @@ -University of Illinois/NCSA Open Source License - -Copyright (c) 2012, University of Illinois, NCSA. All rights reserved. - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal with the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -- Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimers. -- Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimers in the - documentation and/or other materials provided with the distribution. -- Neither the names of University of Illinois, NCSA, nor the names - of its contributors may be used to endorse or promote products - derived from this Software without specific prior written permission. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR -ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF -CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE SOFTWARE. - +YEAR: 2024 +COPYRIGHT HOLDER: PEcAn Project +ORGANIZATION: PEcAn Project, authors affiliations diff --git a/modules/allometry/NEWS.md b/modules/allometry/NEWS.md new file mode 100644 index 00000000000..d0bcb027c0d --- /dev/null +++ b/modules/allometry/NEWS.md @@ -0,0 +1,10 @@ +# PEcAn.allometry 1.7.2.9000 + +## License change +* PEcAn.allometry is now distributed under the BSD three-clause license instead of the NCSA Open Source license. + + +# PEcAn.allometry 1.7.1 + +* All changes in 1.7.1 and earlier were recorded in a single file for all of the PEcAn packages; please see +https://github.com/PecanProject/pecan/blob/v1.7.1/CHANGELOG.md for details. diff --git a/modules/allometry/R/AllomAve.R b/modules/allometry/R/AllomAve.R index 38690cf6f91..357da3edb19 100644 --- a/modules/allometry/R/AllomAve.R +++ b/modules/allometry/R/AllomAve.R @@ -1,22 +1,25 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2012 University of Illinois, NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# University of Illinois/NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- -#' @title AllomAve -#' @name AllomAve -#' @aliases AllomAve +#' AllomAve +#' +#' Allometry wrapper function that handles loading and subsetting the data, +#' fitting the Bayesian models, and generating diagnostic figures. Set up to loop over +#' multiple PFTs and components. +#' Writes raw MCMC and PDF of diagnositcs to file and returns table of summary stats. +#' +#' There are two usages of this function. +#' When running 'online' (connected to the PEcAn database), pass the database connection, +#' con, and the pfts subsection of the PEcAn settings. +#' When running 'stand alone' pass the pft list mapping species to species codes +#' and the file paths to the allometry table and field data (optional) +#' #' @param pfts pft list from PEcAn settings (if con) OR list of pft spcd's #' If the latter, the names within the list are used to identify PFTs -#' \itemize{ +#' \describe{ #' \item{'acronym'}{ - USDA species acronyms (see plants.usda.gov), used with FIELD data (vector)} #' \item{'spcd'}{ - USFS species codes, use with PARM data (vector)} #' } -#' @param components IDs for allometry components from Jenkins et al 2004 Table 5. Default is stem biomass (6). See data(allom.components) +#' @param components IDs for allometry components from Jenkins et al 2004 Table 5. +#' Default is stem biomass (6). See data(allom.components) #' @param outdir output directory files are written to. Default is getwd() #' @param con database connection #' @param field path(s) to raw data files @@ -27,14 +30,6 @@ #' @param dmax maximum dbh of interest #' @return nested list of parameter summary statistics #' @export -#' @description allometery wrapper function that handles loading and subsetting the data, -#' fitting the Bayesian models, and generating diagnostic figures. Set up to loop over -#' multiple PFTs and components. -#' Writes raw MCMC and PDF of diagnositcs to file and returns table of summary stats. -#' -#' @details There are two usages of this function. -#' When running 'online' (connected to the PEcAn database), pass the database connection, con, and the pfts subsection of the PEcAn settings. -#' When running 'stand alone' pass the pft list mapping species to species codes and the file paths to the allometry table and field data (optional) #' #' @examples #' @@ -170,7 +165,7 @@ AllomAve <- function(pfts, components = 6, outdir = NULL, con = NULL, field = NU beta <- allom.stats[[pft.name]][[component]]$statistics[, "Mean"] y.0 <- exp(beta["mu0"] + beta["mu1"] * log(dseq)) y.g <- exp(beta["Bg0"] + beta["Bg1"] * log(dseq)) - y.o <- predict.allom.orig(dseq, allom$parm[ntally, ]) + y.o <- predict_allom_orig(dseq, allom$parm[ntally, ]) graphics::lines(dseq, y.0, lwd = 2, col = 1) graphics::lines(dseq, y.g, lwd = 2, col = 2) for (i in seq_len(nrow(y.o))) { @@ -192,7 +187,7 @@ AllomAve <- function(pfts, components = 6, outdir = NULL, con = NULL, field = NU return(allom.stats) } # AllomAve -predict.allom.orig <- function(x, parm) { +predict_allom_orig <- function(x, parm) { out <- matrix(NA, nrow(parm), length(x)) @@ -262,4 +257,4 @@ predict.allom.orig <- function(x, parm) { } return(out) -} # predict.allom.orig +} # predict_allom_orig diff --git a/modules/allometry/R/allom.BayesFit.R b/modules/allometry/R/allom.BayesFit.R index beeb35ea1d4..91e92022435 100644 --- a/modules/allometry/R/allom.BayesFit.R +++ b/modules/allometry/R/allom.BayesFit.R @@ -1,41 +1,36 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2015 University of Illinois, NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# University of Illinois/NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- -#' @title allom.BayesFit -#' @name allom.BayesFit -#' @aliases allom.BayesFit +#' allom.BayesFit #' -#' @description Module to fit a common power-law allometric model +#' Module to fit a common power-law allometric model #' to a mixture of raw data and allometric equations #' in a Heirarchical Bayes framework with multiple imputation #' of the allometric data #' +#' dependencies: requires MCMCpack and mvtnorm +#' +#' note: runs 1 chain, but multiple chains can be simulated by +#' multiple function calls +#' #' @param allom - object (usually generated by query.allom.data) which #' needs to be a list with two entries: #' 'field' - contains a list, each entry for which is #' a data frame with 'x' and 'y'. Can be NULL #' 'parm' - a single data frame with the following components: -#' \itemize{ -#' \item{n} {sample size} -#' \item{a} {eqn coefficient} -#' \item{b} {eqn coefficient} -#' \item{c} {eqn coefficient} -#' \item{d} {eqn coefficient} -#' \item{e} {eqn coefficient} -#' \item{se} {standard error} -#' \item{eqn} {sample size} -#' \item{Xmin} {smallest tree sampled (cm)} -#' \item{Xmax} {largest tree sampled (cm)} -#' \item{Xcor} {units correction on X} -#' \item{Ycor} {units correction on Y} -#' \item{Xtype} {type of measurement on the X} -#' \item{spp} { - USFS species code} +#' \describe{ +#' \item{n}{sample size} +#' \item{a}{eqn coefficient} +#' \item{b}{eqn coefficient} +#' \item{c}{eqn coefficient} +#' \item{d}{eqn coefficient} +#' \item{e}{eqn coefficient} +#' \item{se}{standard error} +#' \item{eqn}{sample size} +#' \item{Xmin}{smallest tree sampled (cm)} +#' \item{Xmax}{largest tree sampled (cm)} +#' \item{Xcor}{units correction on X} +#' \item{Ycor}{units correction on Y} +#' \item{Xtype}{type of measurement on the X} +#' \item{spp}{ - USFS species code} #' } #' @param nrep - number of MCMC replicates #' @@ -43,11 +38,6 @@ #' @param dmin minimum dbh of interest #' @param dmax maximum dbh of interest - -#' @details dependencies: requires MCMCpack and mvtnorm -#' -#' note: runs 1 chain, but multiple chains can be simulated by -#' multiple function calls #' #' @return returns MCMC chain and ONE instance of 'data' #' note: in many cases the estimates are multiply imputed diff --git a/modules/allometry/R/allom.predict.R b/modules/allometry/R/allom.predict.R index 16f6210919f..ea94f83526f 100644 --- a/modules/allometry/R/allom.predict.R +++ b/modules/allometry/R/allom.predict.R @@ -1,33 +1,27 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2015 University of Illinois, NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# University of Illinois/NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- -#' @title allom.predict -#' @name allom.predict -#' @aliases allom.predict +#' allom.predict +#' +#' Function for making tree-level Monte Carlo predictions +#' from allometric equations estimated from the PEcAn allometry module #' #' @param object Allometry model object. Option includes -#'\itemize{ +#'\describe{ #' \item{'list of mcmc'}{ - mcmc outputs in a list by PFT then component} #' \item{'vector of file paths'}{ - path(s) to AllomAve RData files} #' \item{'directory where files are located}{ - } #' } #' @param dbh Diameter at Breast Height (cm) -#' @param pft Plant Functional Type. Needs to match the name used in AllomAve. Can be NULL if only one PFT/species exists, otherwise needs to the same length as dbh +#' @param pft Plant Functional Type. Needs to match the name used in AllomAve. +#' Can be NULL if only one PFT/species exists, otherwise needs to the same length as dbh #' @param component Which component to predict. Can be NULL if only one component was analysed in AllomAve. #' @param n Number of Monte Carlo samples. Defaults to the same number as in the MCMC object #' @param use c('Bg','mu','best') #' @param interval c('none','confidence','prediction') default is prediction +#' @param single.tree logical: Is this a DBH time series from one individual tree? +#' If TRUE, will use a fixed error for all draws. #' #' @return matrix of Monte Carlo predictions that has n rows and one column per DBH #' -#' @description Function for making tree-level Monte Carlo predictions -#' from allometric equations estimated from the PEcAn allometry module #' #' @examples #' @@ -240,19 +234,18 @@ allom.predict <- function(object, dbh, pft = NULL, component = NULL, n = NULL, u return(out) } # allom.predict -#' @title load.allom -#' @name load.allom +#' load.allom +#' +#' loads allom files #' #' @param object Allometry model object. Option includes -#'\itemize{ +#'\describe{ #' \item{'vector of file paths'}{ - path(s) to AllomAve RData files} #' \item{'directory where files are located}{ - } #' } #' #' @return mcmc outputs in a list by PFT then component #' -#' @description loads allom files -#' #' @examples #' #' \dontrun{ diff --git a/modules/allometry/R/query.allom.data.R b/modules/allometry/R/query.allom.data.R index f5942adf8e2..26eb0ed8aac 100644 --- a/modules/allometry/R/query.allom.data.R +++ b/modules/allometry/R/query.allom.data.R @@ -1,21 +1,14 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2012 University of Illinois, NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# University of Illinois/NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- -#' @title query.allom.data -#' @name query.allom.data -#' @description +#' query.allom.data +#' #' Module to grab allometric information from the raw data table #' Will grab both original field data and tallied equations #' #' Tallied equation format based on Jenkins et al 2004 USFS #' General Technical Report NE-319 #' +#' database is assumed to conform to the PEcAn Schema +#' #' @author Michael Dietze #' #' @param pft_name name of Plant Functional Type to be queried @@ -23,7 +16,6 @@ #' @param con open database connection #' @param nsim number of pseudo-data simulations for estimating SE #' -#' @details database is assumed to conform to the PEcAn Schema query.allom.data <- function(pft_name, variable, con, nsim = 10000) { ## check validity of inputs @@ -80,21 +72,22 @@ query.allom.data <- function(pft_name, variable, con, nsim = 10000) { return(allom) } # query.allom.data -#' @title nu -#' @name nu +#' nu +#' +#' converts factors to numeric +#' #' @param x data -#' @description converts factors to numeric nu <- function(x) { as.numeric(as.character(x)) } # nu -#' @title AllomUnitCoef -#' @name AllomUnitCoef -#' @param x units: mm, cm, cm2, m, in, g, kg, lb, Mg -#' @param tp diameter type, leave NULL if DBH. Options: 'd.b.h.^2','cbh','crc' -#' @description +#' AllomUnitCoef +#' #' converts length units FROM cm TO specified units #' converts mass units TO kg FROM specificed units +#' +#' @param x units: mm, cm, cm2, m, in, g, kg, lb, Mg +#' @param tp diameter type, leave NULL if DBH. Options: 'd.b.h.^2','cbh','crc' AllomUnitCoef <- function(x, tp = NULL) { y <- rep(1, length(x)) diff --git a/modules/allometry/R/read.allom.data.R b/modules/allometry/R/read.allom.data.R index 004fe88ebb1..8b48962a9dc 100644 --- a/modules/allometry/R/read.allom.data.R +++ b/modules/allometry/R/read.allom.data.R @@ -1,19 +1,13 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2015 University of Illinois, NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# University of Illinois/NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- -#' @title read.allom.data -#' @name read.allom.data +#' read.allom.data #' -#' @description Extracts PFT- and component-specific data and allometeric equations from the specified files. +#' Extracts PFT- and component-specific data and allometeric equations from the specified files. #' +#' This code also estimates the standard error from R-squared, +#' which is required to simulate pseudodata from the allometric eqns. +#' #' @param pft.data PFT dataframe -#' \itemize{ +#' \describe{ #' \item{acronym}{USDA species acronyms, used with FIELD data (vector)} #' \item{spcd}{USFS species codes, use with TALLY data (vector)} #' } @@ -23,8 +17,6 @@ #' @param nsim number of Monte Carlo draws in numerical transforms #' @return \item{field}{PFT-filtered field Data} #' \item{parm}{Component- and PFT-filtered Allometric Equations} -#' @details This code also estimates the standard error from R-squared, -#' which is required to simulate pseudodata from the allometric eqns. read.allom.data <- function(pft.data, component, field, parm, nsim = 10000) { allom <- list(parm = NULL, field = NULL) diff --git a/modules/allometry/R/version.R b/modules/allometry/R/version.R new file mode 100644 index 00000000000..0e58d885272 --- /dev/null +++ b/modules/allometry/R/version.R @@ -0,0 +1,3 @@ +# Set at package install time, used by pecan.all::pecan_version() +# to identify development versions of packages +.build_hash <- Sys.getenv("PECAN_GIT_REV", "unknown") diff --git a/modules/allometry/man/AllomAve.Rd b/modules/allometry/man/AllomAve.Rd index f3066a2f276..7d9fc911bab 100644 --- a/modules/allometry/man/AllomAve.Rd +++ b/modules/allometry/man/AllomAve.Rd @@ -20,12 +20,13 @@ AllomAve( \arguments{ \item{pfts}{pft list from PEcAn settings (if con) OR list of pft spcd's If the latter, the names within the list are used to identify PFTs -\itemize{ +\describe{ \item{'acronym'}{ - USDA species acronyms (see plants.usda.gov), used with FIELD data (vector)} \item{'spcd'}{ - USFS species codes, use with PARM data (vector)} }} -\item{components}{IDs for allometry components from Jenkins et al 2004 Table 5. Default is stem biomass (6). See data(allom.components)} +\item{components}{IDs for allometry components from Jenkins et al 2004 Table 5. +Default is stem biomass (6). See data(allom.components)} \item{outdir}{output directory files are written to. Default is getwd()} @@ -47,15 +48,16 @@ If the latter, the names within the list are used to identify PFTs nested list of parameter summary statistics } \description{ -allometery wrapper function that handles loading and subsetting the data, +Allometry wrapper function that handles loading and subsetting the data, fitting the Bayesian models, and generating diagnostic figures. Set up to loop over multiple PFTs and components. Writes raw MCMC and PDF of diagnositcs to file and returns table of summary stats. -} -\details{ + There are two usages of this function. -When running 'online' (connected to the PEcAn database), pass the database connection, con, and the pfts subsection of the PEcAn settings. -When running 'stand alone' pass the pft list mapping species to species codes and the file paths to the allometry table and field data (optional) +When running 'online' (connected to the PEcAn database), pass the database connection, + con, and the pfts subsection of the PEcAn settings. +When running 'stand alone' pass the pft list mapping species to species codes + and the file paths to the allometry table and field data (optional) } \examples{ diff --git a/modules/allometry/man/allom.BayesFit.Rd b/modules/allometry/man/allom.BayesFit.Rd index 66bc5933740..7baa69ba2af 100644 --- a/modules/allometry/man/allom.BayesFit.Rd +++ b/modules/allometry/man/allom.BayesFit.Rd @@ -12,21 +12,21 @@ allom.BayesFit(allom, nrep = 10000, form = "power", dmin = 0.1, dmax = 500) 'field' - contains a list, each entry for which is a data frame with 'x' and 'y'. Can be NULL 'parm' - a single data frame with the following components: - \itemize{ - \item{n} {sample size} - \item{a} {eqn coefficient} - \item{b} {eqn coefficient} - \item{c} {eqn coefficient} - \item{d} {eqn coefficient} - \item{e} {eqn coefficient} - \item{se} {standard error} - \item{eqn} {sample size} - \item{Xmin} {smallest tree sampled (cm)} - \item{Xmax} {largest tree sampled (cm)} - \item{Xcor} {units correction on X} - \item{Ycor} {units correction on Y} - \item{Xtype} {type of measurement on the X} - \item{spp} { - USFS species code} + \describe{ + \item{n}{sample size} + \item{a}{eqn coefficient} + \item{b}{eqn coefficient} + \item{c}{eqn coefficient} + \item{d}{eqn coefficient} + \item{e}{eqn coefficient} + \item{se}{standard error} + \item{eqn}{sample size} + \item{Xmin}{smallest tree sampled (cm)} + \item{Xmax}{largest tree sampled (cm)} + \item{Xcor}{units correction on X} + \item{Ycor}{units correction on Y} + \item{Xtype}{type of measurement on the X} + \item{spp}{ - USFS species code} }} \item{nrep}{- number of MCMC replicates} diff --git a/modules/allometry/man/allom.predict.Rd b/modules/allometry/man/allom.predict.Rd index e48f5d21a51..031510f2504 100644 --- a/modules/allometry/man/allom.predict.Rd +++ b/modules/allometry/man/allom.predict.Rd @@ -17,7 +17,7 @@ allom.predict( } \arguments{ \item{object}{Allometry model object. Option includes -\itemize{ +\describe{ \item{'list of mcmc'}{ - mcmc outputs in a list by PFT then component} \item{'vector of file paths'}{ - path(s) to AllomAve RData files} \item{'directory where files are located}{ - } @@ -25,7 +25,8 @@ allom.predict( \item{dbh}{Diameter at Breast Height (cm)} -\item{pft}{Plant Functional Type. Needs to match the name used in AllomAve. Can be NULL if only one PFT/species exists, otherwise needs to the same length as dbh} +\item{pft}{Plant Functional Type. Needs to match the name used in AllomAve. +Can be NULL if only one PFT/species exists, otherwise needs to the same length as dbh} \item{component}{Which component to predict. Can be NULL if only one component was analysed in AllomAve.} @@ -34,6 +35,9 @@ allom.predict( \item{use}{c('Bg','mu','best')} \item{interval}{c('none','confidence','prediction') default is prediction} + +\item{single.tree}{logical: Is this a DBH time series from one individual tree? +If TRUE, will use a fixed error for all draws.} } \value{ matrix of Monte Carlo predictions that has n rows and one column per DBH diff --git a/modules/allometry/man/load.allom.Rd b/modules/allometry/man/load.allom.Rd index 23389644719..4b9f0415485 100644 --- a/modules/allometry/man/load.allom.Rd +++ b/modules/allometry/man/load.allom.Rd @@ -8,7 +8,7 @@ load.allom(object) } \arguments{ \item{object}{Allometry model object. Option includes -\itemize{ +\describe{ \item{'vector of file paths'}{ - path(s) to AllomAve RData files} \item{'directory where files are located}{ - } }} diff --git a/modules/allometry/man/query.allom.data.Rd b/modules/allometry/man/query.allom.data.Rd index 7185ba0b56e..dfdf8591b1c 100644 --- a/modules/allometry/man/query.allom.data.Rd +++ b/modules/allometry/man/query.allom.data.Rd @@ -18,11 +18,11 @@ query.allom.data(pft_name, variable, con, nsim = 10000) \description{ Module to grab allometric information from the raw data table Will grab both original field data and tallied equations - -Tallied equation format based on Jenkins et al 2004 USFS -General Technical Report NE-319 } \details{ +Tallied equation format based on Jenkins et al 2004 USFS +General Technical Report NE-319 + database is assumed to conform to the PEcAn Schema } \author{ diff --git a/modules/allometry/man/read.allom.data.Rd b/modules/allometry/man/read.allom.data.Rd index e697399a122..cffd3a490ff 100644 --- a/modules/allometry/man/read.allom.data.Rd +++ b/modules/allometry/man/read.allom.data.Rd @@ -8,7 +8,7 @@ read.allom.data(pft.data, component, field, parm, nsim = 10000) } \arguments{ \item{pft.data}{PFT dataframe -\itemize{ +\describe{ \item{acronym}{USDA species acronyms, used with FIELD data (vector)} \item{spcd}{USFS species codes, use with TALLY data (vector)} }} diff --git a/modules/allometry/tests/testthat.R b/modules/allometry/tests/testthat.R index a775e051786..4194358dc88 100644 --- a/modules/allometry/tests/testthat.R +++ b/modules/allometry/tests/testthat.R @@ -1,11 +1,3 @@ -#------------------------------------------------------------------------------- -# Copyright (c) 2012 University of Illinois, NCSA. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the -# University of Illinois/NCSA Open Source License -# which accompanies this distribution, and is available at -# http://opensource.ncsa.illinois.edu/license.html -#------------------------------------------------------------------------------- library(testthat) library(PEcAn.utils) diff --git a/modules/allometry/vignettes/AllomVignette.Rmd b/modules/allometry/vignettes/AllomVignette.Rmd index 111a4d982df..2add42100b5 100644 --- a/modules/allometry/vignettes/AllomVignette.Rmd +++ b/modules/allometry/vignettes/AllomVignette.Rmd @@ -2,7 +2,7 @@ title: "PEcAn.allometry Vignette" author: "Mike Dietze" date: "April 23, 2015" -output: html_document +output: html_vignette vignette: > %\VignetteIndexEntry{PEcAn.allometry Vignette} %\VignetteEngine{knitr::rmarkdown} diff --git a/modules/assim.batch/DESCRIPTION b/modules/assim.batch/DESCRIPTION index 1e8f9787d2d..6283791ace5 100644 --- a/modules/assim.batch/DESCRIPTION +++ b/modules/assim.batch/DESCRIPTION @@ -1,8 +1,7 @@ Package: PEcAn.assim.batch Type: Package Title: PEcAn Functions Used for Ecological Forecasts and Reanalysis -Version: 1.7.2.9000 -Date: 2021-10-04 +Version: 1.8.0.9000 Authors@R: c(person("Mike", "Dietze", role = c("aut"), email = "dietze@bu.edu"), person("Istem", "Fer", role = c("aut", "cre"), @@ -15,7 +14,7 @@ Description: The Predictive Ecosystem Carbon Analyzer (PEcAn) is a scientific model parameterization, execution, and analysis. The goal of PECAn is to streamline the interaction between data and models, and to improve the efficacy of scientific investigation. -VignetteBuilder: knitr +VignetteBuilder: knitr, rmarkdown Imports: abind, BayesianTools, @@ -23,7 +22,6 @@ Imports: MASS, methods, mlegp, - dplyr, ellipse, graphics, grDevices, @@ -51,13 +49,13 @@ Imports: lqmm, mvtnorm Suggests: - knitr, - rmarkdown, + knitr (>= 1.42), + rmarkdown (>= 2.19), testthat (>= 1.0.2) License: BSD_3_clause + file LICENSE Copyright: Authors LazyLoad: yes LazyData: FALSE Encoding: UTF-8 -RoxygenNote: 7.2.3 +RoxygenNote: 7.3.2 Roxygen: list(markdown = TRUE) diff --git a/modules/assim.batch/LICENSE b/modules/assim.batch/LICENSE index 9e38c2dc685..09ef35a60b4 100644 --- a/modules/assim.batch/LICENSE +++ b/modules/assim.batch/LICENSE @@ -1,29 +1,3 @@ -University of Illinois/NCSA Open Source License - -Copyright (c) 2012, University of Illinois, NCSA. All rights reserved. - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal with the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -- Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimers. -- Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimers in the - documentation and/or other materials provided with the distribution. -- Neither the names of University of Illinois, NCSA, nor the names - of its contributors may be used to endorse or promote products - derived from this Software without specific prior written permission. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR -ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF -CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE SOFTWARE. - +YEAR: 2024 +COPYRIGHT HOLDER: PEcAn Project +ORGANIZATION: PEcAn Project, authors affiliations diff --git a/modules/assim.batch/NAMESPACE b/modules/assim.batch/NAMESPACE index ef3ad771c40..455a294cdad 100644 --- a/modules/assim.batch/NAMESPACE +++ b/modules/assim.batch/NAMESPACE @@ -34,6 +34,7 @@ export(pda.define.llik.fn) export(pda.define.prior.fn) export(pda.emulator) export(pda.emulator.ms) +export(pda.generate.externals) export(pda.generate.knots) export(pda.generate.sf) export(pda.get.model.output) diff --git a/modules/assim.batch/NEWS.md b/modules/assim.batch/NEWS.md index b8719ba6ad6..2e34eecf11e 100644 --- a/modules/assim.batch/NEWS.md +++ b/modules/assim.batch/NEWS.md @@ -1,4 +1,10 @@ -# PEcAn.assim.batch 1.7.2.9000 +# PEcAn.assim.batch 1.8.0.9000 + +## License change +* PEcAn.assim.batch is now distributed under the BSD three-clause license instead of the NCSA Open Source license. + + +# PEcAn.assim.batch 1.8.0 ## Breaking changes * In functions `gpeval` and `mcmc.GP`, parameter `splinefcns` has been renamed to `splinefuns` to match the spelling in `minimize.GP`, thereby also fixing several cases where the wrong name was passed between functions. diff --git a/modules/assim.batch/R/autoburnin.R b/modules/assim.batch/R/autoburnin.R index 7450de3658f..c369c6bb761 100644 --- a/modules/assim.batch/R/autoburnin.R +++ b/modules/assim.batch/R/autoburnin.R @@ -74,7 +74,6 @@ getBurnin <- function(jags_out, #' samples (as list). Default = FALSE. #' @param ... Additional arguments for \code{getBurnin}, \code{gelman_diag_mw}, #' and \code{gelman.diag}. -#' @inheritParams getBurnin #' @examples #' z1 <- coda::mcmc(c(rnorm(2500, 5), rnorm(2500, 0))) #' z2 <- coda::mcmc(c(rnorm(2500, -5), rnorm(2500, 0))) diff --git a/modules/assim.batch/R/get.da.data.R b/modules/assim.batch/R/get.da.data.R index b6cff004b58..16997b7b4ca 100644 --- a/modules/assim.batch/R/get.da.data.R +++ b/modules/assim.batch/R/get.da.data.R @@ -41,7 +41,15 @@ calculate.nee.L <- function(yeardoytime, model.i.nee, observed.flux, be, bu) { get.da.data <- function(out.dir, ameriflux.dir, years, be, bu, ensemble.size = 199) { - load(paste(out.dir, "samples.Rdata", sep = "")) + samples.file <- paste(out.dir, "samples.Rdata" , sep = "") + if(file.exists(samples.file)) { + samples <- new.env() + load(samples.file, envir = samples) + ensemble.samples <- samples$ensemble.samples + sa.samples <- samples$sa.samples + } else { + PEcAn.logger::logger.error(samples.file, "not found, this file is required by the get.da.data function") + } pfts <- names(ensemble.samples) pfts <- pfts[pfts != "env"] diff --git a/modules/assim.batch/R/get.da.data.growth.R b/modules/assim.batch/R/get.da.data.growth.R index 4aca974fcf4..37737b37bd7 100644 --- a/modules/assim.batch/R/get.da.data.growth.R +++ b/modules/assim.batch/R/get.da.data.growth.R @@ -80,7 +80,16 @@ get.da.data.growth <- function() { growth <- cbind(buds[, c("plot", "individual", "pft")], growth) ensemble.size <- 500 - load(paste(out.dir, "samples.Rdata", sep = "")) + samples.file <- paste(out.dir, "samples.Rdata" , sep = "") + + if(file.exists(samples.file)) { + samples <- new.env() + load(samples.file, envir = samples) + ensemble.samples <- samples$ensemble.samples + sa.samples <- samples$sa.samples + } else { + PEcAn.logger::logger.error(samples.file, "not found, this file is required by the get.da.data function") + } pfts <- names(ensemble.samples) pfts <- pfts[pfts != "env"] diff --git a/modules/assim.batch/R/pda.generate.externals.R b/modules/assim.batch/R/pda.generate.externals.R index a4c3bf49dbd..b20633eedf1 100644 --- a/modules/assim.batch/R/pda.generate.externals.R +++ b/modules/assim.batch/R/pda.generate.externals.R @@ -53,11 +53,14 @@ ##' e.g. ind.list <- list(temperate.deciduous = c(2), temperate.conifer = c(1,2)) ##' @param nknots number of knots you want to train the emulator on ##' @export -##' example -##' -##' pda.externals <- pda.generate.externals(external.data = TRUE, obs = obs, varn = "NEE", varid = 297, n_eff = 106.9386, external.formats = TRUE, -##' model_data_diag = TRUE, model.out = "/data/workflows/PEcAn_15000000111/out/15000186876", -##' start_date = "2017-01-01", end_date = "2018-12-31") +##' @examples +##' \dontrun{ +##' pda.externals <- pda.generate.externals(external.data = TRUE, obs = obs, +##' varn = "NEE", varid = 297, n_eff = 106.9386, +##' external.formats = TRUE, model_data_diag = TRUE, +##' model.out = "/tmp/out/outdir", +##' start_date = "2017-01-01", end_date = "2018-12-31") +##' } pda.generate.externals <- function(external.data = FALSE, obs = NULL, varn = NULL, varid = NULL, n_eff = NULL, align_method = "match_timestep", par = NULL, model_data_diag = FALSE, model.out = NULL, start_date = NULL, end_date = NULL, @@ -156,8 +159,8 @@ pda.generate.externals <- function(external.data = FALSE, obs = NULL, varn = } pda.externals$external.knots <- external.knots - - + + ##################### model & data alignment diagnostics ##################### if(model_data_diag){ if(is.null(obs) & is.null(model.out) & is.null(varn) & is.null(start_date) & is.null(end_date)){ @@ -186,4 +189,4 @@ pda.generate.externals <- function(external.data = FALSE, obs = NULL, varn = pda.externals$model_data_diag <- model_data_diag return(pda.externals) -} +} \ No newline at end of file diff --git a/modules/assim.batch/R/pda.utils.R b/modules/assim.batch/R/pda.utils.R index 158cf0a1e12..bf619061aee 100644 --- a/modules/assim.batch/R/pda.utils.R +++ b/modules/assim.batch/R/pda.utils.R @@ -297,6 +297,7 @@ pda.load.priors <- function(settings, con, extension.check = FALSE) { if (length(pid) == 0) { pid <- grep("prior.distns.Rdata", files$file_name) ## is there a prior file? + } if (length(pid) > 0) { @@ -322,7 +323,11 @@ pda.load.priors <- function(settings, con, extension.check = FALSE) { # make sure there are no left over distributions in the environment suppressWarnings(rm(post.distns, prior.distns)) - load(prior.paths[[i]]) + distns <- new.env() + load(prior.paths[[i]], envir = "distns") + prior.distns <- distns$prior.distns + post.distns <- distns$post.distns + if (!exists("post.distns")) { prior.out[[i]] <- prior.distns } else { diff --git a/modules/assim.batch/R/plot.da.R b/modules/assim.batch/R/plot.da.R index d83e988e436..3e096f65239 100644 --- a/modules/assim.batch/R/plot.da.R +++ b/modules/assim.batch/R/plot.da.R @@ -2,7 +2,7 @@ ## ported by M. Dietze 08/30/12 ## some of this is redundant with other parts of PEcAn and needs to be cleaned up -plot.da <- function(prior.dir, prior.file, in.dir, out.dir, next.run.dir) { +plot_da <- function(prior.dir, prior.file, in.dir, out.dir, next.run.dir) { # source('code/R/approx.posterior.R') source('code/R/utils.R') @@ -23,8 +23,27 @@ plot.da <- function(prior.dir, prior.file, in.dir, out.dir, next.run.dir) { num.run.ids <- 5 #commandArgs(trailingOnly = TRUE) print(num.run.ids) - load(paste(in.dir, "samples.Rdata", sep = "")) - load(paste(in.dir, "L.nee.Rdata", sep = "")) + samples.file <- paste(in.dir, "samples.Rdata", sep = "") + L.nee.file <- paste(in.dir, "L.nee.Rdata", sep = "") + + if(file.exists(samples.file)) { + samples <- new.env() + load(samples.file, envir = "samples") + ensemble.samples <- samples$ensemble.samples + sa.samples <- samples$sa.samples + } else { + PEcAn.logger::logger.error(samples.file, "not found, this file is required by the plot_da function") + } + + if(file.exists(L.nee.file)) { + L.nee <- new.env() + load(L.nee.file, envir = "L.nee") + x <- L.nee$x + y <- L.nee$y + } else { + PEcAn.logger::logger.error(L.nee.file, "not found, this file is required by the plot_da function") + } + prior.x <- x prior.y <- y @@ -90,7 +109,16 @@ plot.da <- function(prior.dir, prior.file, in.dir, out.dir, next.run.dir) { samp <- lapply(seq(num.run.ids), function(run.id) { print(paste0(in.dir, "./mcmc", run.id, ".Rdata")) - load(paste0(in.dir, "./mcmc", run.id, ".Rdata")) + run.id.file <- paste0(in.dir, "./mcmc", run.id, ".Rdata") + + if(file.exists(run.id.file)) { + run.env <- new.env() + load(run.id.file, envir = "run.env") + m <- run.env$m + } else { + PEcAn.logger::logger.error(run.id.file, "not found, this file is required by the plot_da function") + } + return(m) }) @@ -182,4 +210,4 @@ plot.da <- function(prior.dir, prior.file, in.dir, out.dir, next.run.dir) { graphics::par(mfrow = c(1, 1), cex = 0.5) # graphics::plot(foo[,6] ~ as.factor(rownames(priors))) -} # plot.da +} # plot_da diff --git a/modules/assim.batch/R/version.R b/modules/assim.batch/R/version.R new file mode 100644 index 00000000000..0e58d885272 --- /dev/null +++ b/modules/assim.batch/R/version.R @@ -0,0 +1,3 @@ +# Set at package install time, used by pecan.all::pecan_version() +# to identify development versions of packages +.build_hash <- Sys.getenv("PECAN_GIT_REV", "unknown") diff --git a/modules/assim.batch/man/pda.generate.externals.Rd b/modules/assim.batch/man/pda.generate.externals.Rd index df965607b5d..fc5b3ce409c 100644 --- a/modules/assim.batch/man/pda.generate.externals.Rd +++ b/modules/assim.batch/man/pda.generate.externals.Rd @@ -101,3 +101,12 @@ e.g. ind.list <- list(temperate.deciduous = c(2), temperate.conifer = c(1,2))} This is a helper function for preparing PDA external objects, but it doesn't cover all the cases yet, use it with care You can use this function just to generate either one of the external.* PDA objects, but note that some args cannot be blank depending on what you aim to generate } +\examples{ +\dontrun{ +pda.externals <- pda.generate.externals(external.data = TRUE, obs = obs, +varn = "NEE", varid = 297, n_eff = 106.9386, +external.formats = TRUE, model_data_diag = TRUE, +model.out = "/tmp/out/outdir", +start_date = "2017-01-01", end_date = "2018-12-31") +} +} diff --git a/modules/assim.batch/tests/Rcheck_reference.log b/modules/assim.batch/tests/Rcheck_reference.log index 4dbe004041b..ceaf7cc3ef1 100644 --- a/modules/assim.batch/tests/Rcheck_reference.log +++ b/modules/assim.batch/tests/Rcheck_reference.log @@ -1,64 +1,15 @@ -* using log directory ‘/home/tanishq010/pecan/modules/PEcAn.assim.batch.Rcheck’ -* using R version 4.2.1 (2022-06-23) +* using log directory ‘/tmp/Rtmp0m7lr6/PEcAn.assim.batch.Rcheck’ +* using R version 4.1.3 (2022-03-10) * using platform: x86_64-pc-linux-gnu (64-bit) * using session charset: UTF-8 * using options ‘--no-manual --as-cran’ * checking for file ‘PEcAn.assim.batch/DESCRIPTION’ ... OK * checking extension type ... Package -* this is package ‘PEcAn.assim.batch’ version ‘1.7.2’ +* this is package ‘PEcAn.assim.batch’ version ‘1.7.2.9000’ * package encoding: UTF-8 -* checking CRAN incoming feasibility ... WARNING -Maintainer: ‘Istem Fer ’ - -New submission - -License components with restrictions and base license permitting such: - BSD_3_clause + file LICENSE -File 'LICENSE': - University of Illinois/NCSA Open Source License - - Copyright (c) 2012, University of Illinois, NCSA. All rights reserved. - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal with the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - - Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimers. - - Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimers in the - documentation and/or other materials provided with the distribution. - - Neither the names of University of Illinois, NCSA, nor the names - of its contributors may be used to endorse or promote products - derived from this Software without specific prior written permission. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - IN NO EVENT SHALL THE CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR - ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF - CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE SOFTWARE. - -Strong dependencies not in mainstream repositories: - PEcAn.benchmark, PEcAn.DB, PEcAn.emulator, PEcAn.logger, PEcAn.MA, - PEcAn.remote, PEcAn.settings, PEcAn.uncertainty, PEcAn.utils, - PEcAn.workflow - -The Date field is over a month old. * checking package namespace information ... OK -* checking package dependencies ... WARNING -Imports includes 27 non-default packages. -Importing from so many packages makes the package vulnerable to any of -them becoming unavailable. Move as many as possible to Suggests and -use conditionally. - * checking package dependencies ... NOTE -Imports includes 27 non-default packages. +Imports includes 26 non-default packages. Importing from so many packages makes the package vulnerable to any of them becoming unavailable. Move as many as possible to Suggests and use conditionally. @@ -87,37 +38,17 @@ use conditionally. * checking whether the namespace can be loaded with stated dependencies ... OK * checking whether the namespace can be unloaded cleanly ... OK * checking loading without being on the library search path ... OK -* checking use of S3 registration ... OK -* checking dependencies in R code ... NOTE -Namespace in Imports field not imported from: ‘dplyr’ - All declared Imports should be used. +* checking dependencies in R code ... OK * checking S3 generic/method consistency ... OK * checking replacement functions ... OK * checking foreign function calls ... OK * checking R code for possible problems ... NOTE -get.da.data: no visible binding for global variable ‘ensemble.samples’ -get.da.data : : no visible binding for global variable - ‘sa.samples’ -get.da.data: no visible binding for global variable ‘sa.samples’ get.da.data : : no visible global function definition for ‘read.output.type’ -get.da.data.growth: no visible binding for global variable - ‘ensemble.samples’ -get.da.data.growth: no visible binding for global variable ‘sa.samples’ get.da.data.growth : : no visible global function definition for ‘read.output.type’ -pda.load.priors: no visible binding for global variable ‘post.distns’ -pda.load.priors: no visible binding for global variable ‘prior.distns’ -plot.da: no visible binding for global variable ‘y’ -plot.da: no visible binding for global variable ‘ensemble.samples’ -plot.da : : no visible binding for global variable - ‘ensemble.samples’ -plot.da : : no visible binding for global variable - ‘sa.samples’ -plot.da : : no visible binding for global variable ‘m’ Undefined global functions or variables: - ensemble.samples m post.distns prior.distns read.output.type - sa.samples y + read.output.type Found the following assignments to the global environment: File ‘PEcAn.assim.batch/R/pda.get.model.output.R’: @@ -145,4 +76,9 @@ File ‘PEcAn.assim.batch/R/pda.get.model.output.R’: * checking for detritus in the temp directory ... OK * DONE -Status: 2 WARNING, 3 NOTEs +Status: 1 WARNING, 2 NOTEs +See + ‘/tmp/Rtmp0m7lr6/PEcAn.assim.batch.Rcheck/00check.log’ +for details. + + diff --git a/modules/assim.sequential/DESCRIPTION b/modules/assim.sequential/DESCRIPTION index 7543e985e1d..dfa454384cd 100644 --- a/modules/assim.sequential/DESCRIPTION +++ b/modules/assim.sequential/DESCRIPTION @@ -1,8 +1,7 @@ Package: PEcAnAssimSequential Type: Package Title: PEcAn Functions Used for Ecological Forecasts and Reanalysis -Version: 1.7.2 -Date: 2021-10-04 +Version: 1.8.0.9000 Author: Mike Dietze Maintainer: Mike Dietze Description: The Predictive Ecosystem Carbon Analyzer (PEcAn) is a scientific @@ -33,22 +32,30 @@ Imports: stringr Suggests: corrplot, - DBI, + exactextractr, ggrepel, emdbook, glue, + ggpubr, gridExtra, magic (>= 1.5.0), methods, PEcAn.benchmark, + PEcAn.data.land, PEcAn.data.remote, + PEcAn.utils, + PEcAn.visualization, plotrix, plyr (>= 1.8.4), + randomForest, + keras3 (>= 1.0.0), raster, + readr, reshape2 (>= 1.4.2), rlist, sf, stats, + terra, testthat, tictoc, tidyr, @@ -58,4 +65,4 @@ Suggests: License: BSD_3_clause + file LICENSE Copyright: Authors Encoding: UTF-8 -RoxygenNote: 7.2.3 +RoxygenNote: 7.3.2 diff --git a/modules/assim.sequential/NAMESPACE b/modules/assim.sequential/NAMESPACE index 3c571d718c2..db21f07876e 100644 --- a/modules/assim.sequential/NAMESPACE +++ b/modules/assim.sequential/NAMESPACE @@ -11,26 +11,31 @@ export(EnKF.MultiSite) export(GEF) export(GEF.MultiSite) export(GEF.MultiSite.Nimble) +export(GrabFillMatrix) export(Local.support) export(Obs.data.prepare.MultiSite) export(Prep_OBS_SDA) export(Remote_Sync_launcher) export(SDA_OBS_Assembler) export(SDA_control) +export(SDA_downscale_hrly) export(SDA_remote_launcher) +export(SDA_timeseries_plot) export(adj.ens) +export(aggregate) export(alltocs) export(alr) export(assessParams) export(block_matrix) export(conj_wt_wishart_sampler) +export(construct_nimble_H) export(dwtmnorm) -export(generate_colors_sda) export(get_ensemble_weights) export(hop_test) export(interactive.plotting.sda) export(inv.alr) export(load_data_paleon_sda) +export(matrix_network) export(metSplit) export(obs_timestep2timepoint) export(outlier.detector.boxplot) @@ -52,10 +57,12 @@ export(sda_weights_site) export(simple.local) export(tobit.model) export(tobit2space.model) +export(tobit_model_censored) export(y_star_create) import(furrr) import(lubridate) import(nimble) +importFrom(dplyr,"%>%") importFrom(lubridate,"%m+%") importFrom(magrittr,"%>%") importFrom(rlang,.data) diff --git a/modules/assim.sequential/R/Adjustment.R b/modules/assim.sequential/R/Adjustment.R index e1e061dbc9e..93c97228c4c 100644 --- a/modules/assim.sequential/R/Adjustment.R +++ b/modules/assim.sequential/R/Adjustment.R @@ -44,10 +44,14 @@ adj.ens<-function(Pf, X, mu.f, mu.a, Pa){ X_a[i,] <- V_a %*%diag(sqrt(L_a))%*%Z[i,] + mu.a } - - if(sum(mu.a - colMeans(X_a)) > 1 | sum(mu.a - colMeans(X_a)) < -1) logger.warn('Problem with ensemble adjustment (1)') - if(sum(diag(Pa) - diag(cov(X_a))) > 5 | sum(diag(Pa) - diag(cov(X_a))) < -5) logger.warn('Problem with ensemble adjustment (2)') - + if (sum(mu.a - colMeans(X_a)) > 1 + || sum(mu.a - colMeans(X_a)) < -1) { + PEcAn.logger::logger.warn('Problem with ensemble adjustment (1)') + } + if (sum(diag(Pa) - diag(stats::cov(X_a))) > 5 + || sum(diag(Pa) - diag(stats::cov(X_a))) < -5) { + PEcAn.logger::logger.warn('Problem with ensemble adjustment (2)') + } analysis <- as.data.frame(X_a) return(analysis) diff --git a/modules/assim.sequential/R/Analysis_sda.R b/modules/assim.sequential/R/Analysis_sda.R index 5c209b4d1d7..e2af6bbfd0f 100644 --- a/modules/assim.sequential/R/Analysis_sda.R +++ b/modules/assim.sequential/R/Analysis_sda.R @@ -98,6 +98,7 @@ EnKF<-function(settings, Forecast, Observed, H, extraArg=NULL, ...){ ##' @param settings pecan standard settings list. ##' @param Forecast A list containing the forecasts variables including Q (process variance) and X (a dataframe of forecast state variables for different ensemble) ##' @param Observed A list containing the observed variables including R (cov of observed state variables) and Y (vector of estimated mean of observed state variables) +##' @param H not used ##' @param extraArg This argument is a list containing aqq, bqq and t. The aqq and bqq are shape parameters estimated over time for the process covariance and t gives the time in terms of index of obs.list. See Details. ##' @param nitr Number of iterations to run each MCMC chain. ##' @param nburnin Number of initial, pre-thinning, MCMC iterations to discard. diff --git a/modules/assim.sequential/R/Analysis_sda_block.R b/modules/assim.sequential/R/Analysis_sda_block.R new file mode 100644 index 00000000000..6c37b4f251f --- /dev/null +++ b/modules/assim.sequential/R/Analysis_sda_block.R @@ -0,0 +1,635 @@ +##' @title analysis_sda_block +##' @name analysis_sda_block +##' @author Dongchen Zhang +##' +##' @param settings pecan standard multi-site settings list. +##' @param block.list.all Lists of forecast and analysis outputs for each time point of each block. If t=1, we initialize those outputs of each block with NULL from the `sda.enkf.multisite` function. +##' @param X A matrix contains ensemble forecasts with the dimensions of `[ensemble number, site number * number of state variables]`. The columns are matched with the site.ids and state variable names of the inside the `FORECAST` object in the `sda.enkf.multisite` script. +##' @param obs.mean Lists of date times named by time points, which contains lists of sites named by site ids, which contains observation means for each state variables of each site for each time point. +##' @param obs.cov Lists of date times named by time points, which contains lists of sites named by site ids, which contains observation covariances for all state variables of each site for each time point. +##' @param t time point in format of YYYY-MM-DD. +##' @param nt total length of time steps, corresponding to the `nt` variable in the `sda.enkf.multisite` function. +##' @param MCMC.args arguments for the MCMC sampling, details can be found in the roxygen strucutre for control list in the `sda.enkf.multisite` function. +##' @param block.list.all.pre pre-existed block.list.all object for passing the aqq and bqq to the current SDA run, the default is NULL. Details can be found in the roxygen structure for `pre_enkf_params` of the `sda.enkf.multisite` function +##' @details This function will add data and constants into each block that are needed for the MCMC sampling. +##' +##' @description This function provides the block-based MCMC sampling approach. +##' +##' @return It returns the `build.block.xy` object and the analysis results. +##' @importFrom dplyr %>% +analysis_sda_block <- function (settings, block.list.all, X, obs.mean, obs.cov, t, nt, MCMC.args, block.list.all.pre = NULL) { + #convert from vector values to block lists. + if ("try-error" %in% class(try(block.results <- build.block.xy(settings = settings, + block.list.all = block.list.all, + X = X, + obs.mean = obs.mean, + obs.cov = obs.cov, + t = t)))) { + PEcAn.logger::logger.severe("Something wrong within the build.block.xy function.") + return(0) + } + #grab block.list and H from the results. + block.list.all <- block.results[[1]] + H <- block.results[[2]] + Y <- block.results[[3]] + R <- block.results[[4]] + + #update q. + if ("try-error" %in% class(try(block.list.all <- update_q(block.list.all, t, nt, aqq.Init = as.numeric(settings$state.data.assimilation$aqq.Init), + bqq.Init = as.numeric(settings$state.data.assimilation$bqq.Init), + MCMC_dat = NULL, + block.list.all.pre)))) { + PEcAn.logger::logger.severe("Something wrong within the update_q function.") + return(0) + } + + #add initial conditions for the MCMC sampling. + if ("try-error" %in% class(try(block.list.all[[t]] <- MCMC_Init(block.list.all[[t]], X)))) { + PEcAn.logger::logger.severe("Something wrong within the MCMC_Init function.") + return(0) + } + + #update MCMC args. + block.list.all[[t]] <- block.list.all[[t]] %>% + purrr::map(function(l){ + l$MCMC <- MCMC.args + l + }) + + #parallel for loop over each block. + PEcAn.logger::logger.info(paste0("Running MCMC ", "for ", length(block.list.all[[t]]), " blocks")) + if ("try-error" %in% class(try(block.list.all[[t]] <- furrr::future_map(block.list.all[[t]], MCMC_block_function, .progress = T)))) { + PEcAn.logger::logger.severe("Something wrong within the MCMC_block_function function.") + return(0) + } + PEcAn.logger::logger.info("Completed!") + + #convert from block lists to vector values. + if ("try-error" %in% class(try(V <- block.2.vector(block.list.all[[t]], X, H)))) { + PEcAn.logger::logger.severe("Something wrong within the block.2.vector function.") + return(0) + } + + #return values + return(list(block.list.all = block.list.all, + mu.f = V$mu.f, + Pf = V$Pf, + mu.a = V$mu.a, + Pa = V$Pa, + Y = Y, + R = R)) +} + +##' @title build.block.xy +##' @name build.block.xy +##' @author Dongchen Zhang +##' +##' @param settings pecan standard multi-site settings list. +##' @param block.list.all List contains nt empty sub-elements. +##' @param X A matrix contains ensemble forecasts. +##' @param obs.mean List of dataframe of observation means, named with observation datetime. +##' @param obs.cov List of covariance matrices of state variables , named with observation datetime. +##' @param t time point. +##' @details This function will add data and constants into each block that are needed for the MCMC sampling. +##' +##' @description This function split long vector and covariance matrix into blocks corresponding to the localization. +##' +##' @return It returns the `build.block.xy` object with data and constants filled in. +build.block.xy <- function(settings, block.list.all, X, obs.mean, obs.cov, t) { + #set q.type from settings. + if (settings$state.data.assimilation$q.type == "vector") { + q.type <- 3 + } else if (settings$state.data.assimilation$q.type == "wishart") { + q.type <- 4 + } + #grab basic arguments based on X. + site.ids <- unique(attributes(X)$Site) + var.names <- unique(attributes(X)$dimnames[[2]]) + mu.f <- colMeans(X) + Pf <- stats::cov(X) + if (length(diag(Pf)[which(diag(Pf)==0)]) > 0) { + diag(Pf)[which(diag(Pf)==0)] <- min(diag(Pf)[which(diag(Pf) != 0)])/5 #fixing det(Pf)==0 + PEcAn.logger::logger.warn("The zero variances in Pf is being replaced by one fifth of the minimum variance in those matrices respectively.") + } + #distance calculations and localization + site.locs <- settings$run %>% + purrr::map('site') %>% + purrr::map_dfr(~c(.x[['lon']],.x[['lat']]) %>% as.numeric)%>% + t %>% + `colnames<-`(c("Lon","Lat")) %>% + `rownames<-`(site.ids) + #Finding the distance between the sites + dis.matrix <- sp::spDists(site.locs, longlat = TRUE) + if (!is.null(settings$state.data.assimilation$Localization.FUN)) { + Localization.FUN <- get(settings$state.data.assimilation$Localization.FUN) + #turn that into a blocked matrix format + blocked.dis <- block_matrix(dis.matrix %>% as.numeric(), rep(length(var.names), length(site.ids))) + Pf <- Localization.FUN(Pf, blocked.dis, settings$state.data.assimilation$scalef %>% as.numeric()) + } + #Handle observation + #observation number per site + #free run special case. + if (is.null(obs.mean[[t]])) { + obs_per_site <- rep(0, length(site.ids)) %>% purrr::set_names(site.ids) + } else { + obs_per_site <- purrr::map_int(obs.mean[[t]], length) + } + #if we do free run or the current obs.mean are all NULL. + if (as.logical(settings$state.data.assimilation$free.run) | all(is.null(unlist(obs.mean[[t]])))) { + H <- list(ind = seq_along(rep(var.names, length(site.ids)))) + Y <- rep(NA, length(H$ind)) + R <- diag(1, length(H$ind)) + } else if (!as.logical(settings$state.data.assimilation$free.run) && all(is.null(unlist(obs.mean[[t]])))) { + PEcAn.logger::logger.error("Please set the settings$state.data.assimilation$free.run as TRUE if you don't have any observations!") + return(0) + } else { + Obs.cons <- Construct.R(site.ids, var.names, obs.mean[[t]], obs.cov[[t]]) + Y <- Obs.cons$Y + R <- Obs.cons$R + if (length(Y) > 1) { + if (length(diag(R)[which(diag(R)==0)]) > 0) { + diag(R)[which(diag(R)==0)] <- min(diag(R)[which(diag(R) != 0)])/2 + PEcAn.logger::logger.warn("The zero variances in R is being replaced by half of the minimum variance in those matrices respectively.") + } + } + #create matrix the describes the support for each observed state variable at time t + min_max <- settings$state.data.assimilation$state.variables %>% + purrr::map(function(state.variable){ + c(as.numeric(state.variable$min_value), + as.numeric(state.variable$max_value)) + }) %>% unlist() %>% as.vector() %>% + matrix(length(settings$state.data.assimilation$state.variables), 2, byrow = T) %>% + `rownames<-`(var.names) + #Create y.censored and y.ind + #describing if the obs are within the defined range. + y.ind <- y.censored <- c() + for (i in seq_along(Y)) { + if (Y[i] > min_max[names(Y[i]), 1]) { + y.ind[i] = 1; y.censored[i] = Y[i] + } else {y.ind[i] <- y.censored[i] <- 0} + } + #create H + # if there is any site that has zero observation. + if (any(obs_per_site == 0)) { + #name matching between observation names and state variable names. + f.2.y.ind <- obs.mean[[t]] %>% + purrr::map(\(x)which(var.names %in% names(x))) %>% + unlist %>% + unique + H <- list(ind = f.2.y.ind %>% purrr::map(function(start){ + seq(start, length(site.ids) * length(var.names), length(var.names)) + }) %>% unlist() %>% sort) + } else { + H <- construct_nimble_H(site.ids = site.ids, + var.names = var.names, + obs.t = obs.mean[[t]], + pft.path = settings[[1]]$run$inputs$pft.site$path, + by = "block_pft_var") + } + } + #start the blocking process + #should we consider interactions between sites? + if(as.numeric(settings$state.data.assimilation$scalef) == 0){ + block.list <- vector("list", length(site.ids)) + #loop over sites + for (i in seq_along(site.ids)) { + #store which block contains which sites. + block.list[[i]]$sites.per.block <- i + block.list[[i]]$site.ids <- site.ids[i] + block.list[[i]]$t <- t + #fill in mu.f and Pf + f.start <- (i - 1) * length(var.names) + 1 + f.end <- i * length(var.names) + block.list[[i]]$data$muf <- mu.f[f.start:f.end] + block.list[[i]]$data$pf <- Pf[f.start:f.end, f.start:f.end] + #find indexs for Y. + y.start <- sum(obs_per_site[1:i]) - obs_per_site[i] + 1 + y.end <- sum(obs_per_site[1:i]) + #fill in y and r + #if there is no observation for this site. + if (y.end < y.start) { + #if every site has zero observation/free run. + if (max(obs_per_site) == 0) { + block.list[[i]]$data$y.censored <- rep(NA, length(var.names)) + block.list[[i]]$data$r <- diag(1, length(var.names)) + block.h <- matrix(1, 1, length(var.names)) + } else { + block.list[[i]]$data$y.censored <- rep(NA, max(obs_per_site)) + block.list[[i]]$data$r <- diag(1, max(obs_per_site)) + block.h <- matrix(1, 1, max(obs_per_site)) + } + } else { + block.list[[i]]$data$y.censored <- y.censored[y.start:y.end] + block.list[[i]]$data$r <- solve(R[y.start:y.end, y.start:y.end]) + block.h <- Construct.H.multisite(site.ids[i], var.names, obs.mean[[t]]) + } + #fill in constants. + block.list[[i]]$H <- block.h + block.list[[i]]$constant$H <- which(apply(block.h, 2, sum) == 1) + block.list[[i]]$constant$N <- length(f.start:f.end) + block.list[[i]]$constant$YN <- length(y.start:y.end) + block.list[[i]]$constant$q.type <- q.type + } + names(block.list) <- site.ids + } else { + #find networks given TRUE/FALSE matrix representing sites' interactions. + block.vec <- matrix_network(dis.matrix <= as.numeric(settings$state.data.assimilation$scalef)) + #check if the matrix_network function is working correctly. + #check if the blocks are calculated correctly. + if (block.vec %>% + purrr::map(function(l){length(l)}) %>% + unlist %>% + sum() != length(site.ids)) { + PEcAn.logger::logger.severe("Block calculation failed, please check the matrix_network function!") + return(0) + } + block.list <- vector("list", length(block.vec)) + #loop over sites + for (i in seq_along(block.vec)) {#i is site index + #store which block contains which sites. + ids <- block.vec[[i]] + block.list[[i]]$sites.per.block <- ids + block.list[[i]]$site.ids <- site.ids[ids] + block.list[[i]]$t <- t + y.ind <- f.ind <- na.ind <- c() + r.block <- y.block <- c() + for (j in seq_along(ids)) { + f.start <- (ids[j] - 1) * length(var.names) + 1 + f.end <- ids[j] * length(var.names) + y.start <- sum(obs_per_site[1:ids[j]]) - obs_per_site[ids[j]] + 1 + y.end <- sum(obs_per_site[1:ids[j]]) + f.ind <- c(f.ind, f.start:f.end) + #if the current site has greater or equal than 1 observation. + if (y.end >= y.start) { + # y.ind <- c(y.ind, y.start:y.end) + y.block <- c(y.block, y.censored[y.start:y.end]) + r.block <- c(r.block, diag(R)[y.start:y.end]) + } else { + #if the current site has zero observation. + #if for free run. + if (max(obs_per_site) == 0) { + y.block <- c(y.block, rep(NA, length(var.names))) + r.block <- c(r.block, rep(1, length(var.names))) + } else { + y.block <- c(y.block, rep(NA, max(obs_per_site))) + r.block <- c(r.block, rep(1, max(obs_per_site))) + } + } + } + #if we have NA for y, we will build H differently. + if (any(is.na(y.block))) { + block.h <- matrix(0, 1, length(ids)*length(var.names)) + #if for free run. + if (is.null(obs.mean[[t]])) { + f.2.y.ind <- seq_along(var.names) + } else { + f.2.y.ind <- obs.mean[[t]] %>% + purrr::map(\(x)which(var.names %in% names(x))) %>% + unlist %>% + unique + } + seq.ind <- f.2.y.ind %>% purrr::map(function(start){ + seq(start, dim(block.h)[2], length(var.names)) + }) %>% unlist() + block.h[1, seq.ind] <- 1 + } else { + block.h <- Construct.H.multisite(site.ids[ids], var.names, obs.mean[[t]]) + } + #fill in mu.f and Pf + block.list[[i]]$data$muf <- mu.f[f.ind] + block.list[[i]]$data$pf <- GrabFillMatrix(Pf, f.ind) + #fill in y and R + block.list[[i]]$data$y.censored <- y.block + if (length(r.block) == 1) { + block.list[[i]]$data$r <- 1/r.block + } else { + block.list[[i]]$data$r <- solve(diag(r.block)) + } + block.list[[i]]$H <- block.h + block.list[[i]]$constant$H <- which(apply(block.h, 2, sum) == 1) + block.list[[i]]$constant$N <- length(f.ind) + block.list[[i]]$constant$YN <- length(y.block) + block.list[[i]]$constant$q.type <- q.type + } + } + #if it's Wishart Q, we need to replace any NA Y with corresponding muf, and r with Pf. + #also, if length of observation is 1, the Wishart Q is not suitable for the MCMC. + #we will then need to change the Q type to 3, which is the vector Q. + if (q.type == 4) { + for (i in seq_along(block.list)) { + #check length. + if (block.list[[i]]$constant$YN == 1) { + block.list[[i]]$constant$q.type <- 3 + next + } + # #check NAs. + # na.ind <- which(is.na(block.list[[i]]$data$y.censored)) + # if (length(na.ind) > 0) { + # block.list[[i]]$constant$YN <- block.list[[i]]$constant$YN - length(na.ind) + # block.list[[i]]$constant$H <- block.list[[i]]$constant$H[-na.ind] + # block.list[[i]]$data$y.censored <- block.list[[i]]$data$y.censored[-na.ind] + # block.list[[i]]$data$r <- diag(diag(block.list[[i]]$data$r)[-na.ind]) + # } + # na.site.ind <- which(obs_per_site[block.list[[i]]$site.ids] == 0) + # na.ind <- which(is.na(block.list[[i]]$data$y.censored)) + # if (length(na.site.ind) > 0) { + # site.inds <- block.list[[i]]$sites.per.block[na.site.ind] + # y.2.muf.ind <- f.2.y.ind %>% purrr::map(function(start){ + # seq(start, length(mu.f), length(var.names))[site.inds] + # }) %>% unlist() %>% sort() + # block.list[[i]]$data$y.censored[na.ind] <- mu.f[y.2.muf.ind] + # block.list[[i]]$data$r[na.ind, na.ind] <- Pf[y.2.muf.ind, y.2.muf.ind] + # } + } + } + #return values. + block.list.all[[t]] <- block.list + return(list(block.list.all = block.list.all, H = H, Y = Y, R = R)) +} + +##' @title MCMC_Init +##' @name MCMC_Init +##' @author Dongchen Zhang +##' +##' @param block.list lists of blocks generated by the `build.block.xy` function. +##' @param X A matrix contains ensemble forecasts. +##' @details This function helps create initial conditions for the MCMC sampling. +##' +##' @return It returns the `block.list` object with initial conditions filled in. +MCMC_Init <- function (block.list, X) { + var.names <- unique(attributes(X)$dimnames[[2]]) + #sample mu.f from X. + sample.mu.f <- colMeans(X) + for (i in seq_along(block.list)) { + #number of observations. + num.obs <- length(block.list[[i]]$data$y.censored) + #loop over each site within each block + for (j in seq_along(block.list[[i]]$sites.per.block)) { + #initialize mu.f + start <- (block.list[[i]]$sites.per.block[j] - 1) * length(var.names) + 1 + end <- (block.list[[i]]$sites.per.block[j]) * length(var.names) + block.list[[i]]$Inits$X.mod <- c(block.list[[i]]$Inits$X.mod, sample.mu.f[start:end]) + #initialize X + block.list[[i]]$Inits$X <- block.list[[i]]$data$y.censored + #initialize Xs + block.list[[i]]$Inits$Xs <- block.list[[i]]$Inits$X.mod[block.list[[i]]$constant$H] + } + #initialize q. + #if we want the vector q. + if (block.list[[i]]$constant$q.type == 3) { + for (j in seq_along(block.list[[i]]$data$y.censored)) { + block.list[[i]]$Inits$q <- c(block.list[[i]]$Inits$q, stats::rgamma(1, shape = block.list[[i]]$data$aq[j], rate = block.list[[i]]$data$bq[j])) + } + } else if (block.list[[i]]$constant$q.type == 4) { + #if we want the wishart Q. + if ("try-error" %in% class(try(block.list[[i]]$Inits$q <- + stats::rWishart(1, df = block.list[[i]]$data$bq, Sigma = block.list[[i]]$data$aq)[,,1], silent = T))) { + block.list[[i]]$Inits$q <- + stats::rWishart(1, df = block.list[[i]]$data$bq, Sigma = stats::toeplitz((block.list[[i]]$constant$YN:1)/block.list[[i]]$constant$YN))[,,1] + } + } + } + #return values. + return(block.list) +} + +##' @title MCMC_block_function +##' @name MCMC_block_function +##' @author Dongchen Zhang +##' +##' @param block each block within the `block.list` lists. +##' +##' @return It returns the `block` object with analysis results filled in. +MCMC_block_function <- function(block) { + #build nimble model + #TODO: harmonize the MCMC code between block-based and general analysis functions to reduce the complexity of code. + model_pred <- nimble::nimbleModel(GEF.MultiSite.Nimble, + data = block$data, + inits = block$Inits, + constants = block$constant, + name = 'base') + #configure MCMC + conf <- nimble::configureMCMC(model_pred, print=FALSE) + conf$setMonitors(c("X", "X.mod", "q")) + + #Handle samplers + #hear we change the RW_block sampler to the ess sampler + #because it has a better performance of MVN sampling + samplerLists <- conf$getSamplers() + samplerNumberOffset <- length(samplerLists) + if (block$constant$q.type == 4) { + #if we have wishart q + #everything should be sampled with ess sampler. + samplerLists %>% purrr::map(function(l){l$setName("ess")}) + } + conf$setSamplers(samplerLists) + + #add Pf as propCov in the control list of the X.mod nodes. + X.mod.ind <- which(grepl("X.mod", samplerLists %>% purrr::map(~ .x$target) %>% unlist())) + conf$removeSampler(samplerLists[[X.mod.ind]]$target) + conf$addSampler(target = samplerLists[[X.mod.ind]]$target, type = "ess", + control = list(propCov= block$data$pf, adaptScaleOnly = TRUE, + latents = "X", pfOptimizeNparticles = TRUE)) + + #add toggle Y sampler. + for (i in 1:block$constant$YN) { + conf$addSampler(paste0("y.censored[", i, "]"), 'toggle', control=list(type='RW')) + } + conf$printSamplers() + #compile MCMC + Rmcmc <- nimble::buildMCMC(conf) + Cmodel <- nimble::compileNimble(model_pred) + Cmcmc <- nimble::compileNimble(Rmcmc, project = model_pred, showCompilerOutput = FALSE) + + #if we don't have any NA in the Y. + if (!any(is.na(block$data$y.censored))) { + #add toggle Y sampler. + for(i in 1:block$constant$YN) { + valueInCompiledNimbleFunction(Cmcmc$samplerFunctions[[samplerNumberOffset+i]], 'toggle', 0) + } + } + + #run MCMC + dat <- runMCMC(Cmcmc, niter = block$MCMC$niter, nburnin = block$MCMC$nburnin, thin = block$MCMC$nthin, nchains = block$MCMC$nchain) + #update aq, bq, mua, and pa + M <- colMeans(dat) + block$update$aq <- block$Inits$q + if (block$constant$q.type == 3) { + #if it's a vector q case + aq <- bq <- rep(NA, length(block$data$y.censored)) + for (i in seq_along(aq)) { + CHAR <- paste0("[", i, "]") + aq[i] <- (mean(dat[, paste0("q", CHAR)]))^2/stats::var(dat[, paste0("q", CHAR)]) + bq[i] <- mean(dat[, paste0("q", CHAR)])/stats::var(dat[, paste0("q", CHAR)]) + } + #update aqq and bqq + block$aqq[,block$t+1] <- block$aqq[, block$t] + block$aqq[block$constant$H, block$t+1] <- aq + block$bqq[,block$t+1] <- block$bqq[, block$t] + block$bqq[block$constant$H, block$t+1] <- bq + } else if (block$constant$q.type == 4) { + #previous updates + mq <- dat[, grep("q", colnames(dat))] # Omega, Precision + q.bar <- matrix(apply(mq, 2, mean), + length(block$constant$H), + length(block$constant$H) + ) + wish.df <- function(Om, X, i, j, col) { + (Om[i, j]^2 + Om[i, i] * Om[j, j]) / stats::var(X[, col]) + } + col <- matrix(1:length(block$constant$H) ^ 2, + length(block$constant$H), + length(block$constant$H)) + WV <- matrix(0, length(block$constant$H), length(block$constant$H)) + for (i in seq_along(block$constant$H)) { + for (j in seq_along(block$constant$H)) { + WV[i, j] <- wish.df(q.bar, X = mq, i = i, j = j, col = col[i, j]) + } + } + bq <- mean(WV) + if (bq < block$constant$YN) { + bq <- block$constant$YN + } + aq <- solve(q.bar) * bq + block$aqq[,,block$t+1] <- GrabFillMatrix(block$aqq[,,block$t], block$constant$H, aq) + block$bqq[block$t+1] <- bq + } + #update mua and pa; mufa, and pfa + iX <- grep("X[", colnames(dat), fixed = TRUE) + iX.mod <- grep("X.mod[", colnames(dat), fixed = TRUE) + if (length(iX) == 1) { + mua <- mean(dat[, iX]) + pa <- stats::var(dat[, iX]) + } else { + mua <- colMeans(dat[, iX]) + pa <- stats::cov(dat[, iX]) + } + + if (length(iX.mod) == 1) { + mufa <- mean(dat[, iX.mod]) + pfa <- stats::var(dat[, iX.mod]) + } else { + mufa <- colMeans(dat[, iX.mod]) + pfa <- stats::cov(dat[, iX.mod]) + } + + #return values. + block$update <- list(aq = aq, bq = bq, mua = mua, pa = pa, mufa = mufa, pfa = pfa) + return(block) +} + +##' @title update_q +##' @name update_q +##' @author Dongchen Zhang +##' +##' @param block.list.all each block within the `block.list` lists. +##' @param t time point. +##' @param nt total length of time steps. +##' @param aqq.Init the initial values of aqq, the default is NULL. +##' @param bqq.Init the initial values of bqq, the default is NULL. +##' @param MCMC_dat data frame of MCMC samples, the default it NULL. +##' @param block.list.all.pre pre-existed block.list.all object for passing the aqq and bqq to the current SDA run, the default is NULL. +##' +##' @return It returns the `block.list.all` object with initialized/updated Q filled in. +update_q <- function (block.list.all, t, nt, aqq.Init = NULL, bqq.Init = NULL, MCMC_dat = NULL, block.list.all.pre = NULL) { + block.list <- block.list.all[[t]] + #if it's an update. + if (is.null(MCMC_dat)) { + #loop over blocks + if (t == 1) { + for (i in seq_along(block.list)) { + nvar <- length(block.list[[i]]$data$muf) + nobs <- length(block.list[[i]]$data$y.censored) + if (block.list[[i]]$constant$q.type == 3) { + #initialize aqq and bqq for nt + if (!is.null(aqq.Init) && !is.null(bqq.Init)) { + block.list[[i]]$aqq <- array(aqq.Init, dim = c(nvar, nt + 1)) + block.list[[i]]$bqq <- array(bqq.Init, dim = c(nvar, nt + 1)) + } else { + block.list[[i]]$aqq <- array(1, dim = c(nvar, nt + 1)) + block.list[[i]]$bqq <- array(1, dim = c(nvar, nt + 1)) + } + #update aq and bq based on aqq and bqq + block.list[[i]]$data$aq <- block.list[[i]]$aqq[block.list[[i]]$constant$H, t] + block.list[[i]]$data$bq <- block.list[[i]]$bqq[block.list[[i]]$constant$H, t] + } else if (block.list[[i]]$constant$q.type == 4) { + #initialize aqq and bqq for nt + block.list[[i]]$aqq <- array(1, dim = c(nvar, nvar, nt + 1)) + block.list[[i]]$aqq[,,t] <- stats::toeplitz((nvar:1)/nvar) + block.list[[i]]$bqq <- rep(nobs, nt + 1) + #update aq and bq based on aqq and bqq + block.list[[i]]$data$aq <- GrabFillMatrix(block.list[[i]]$aqq[,,t], block.list[[i]]$constant$H) + block.list[[i]]$data$bq <- block.list[[i]]$bqq[t] + } + } + } else if (t > 1) { + if (!is.null(block.list.all.pre)) { + block.list.pre <- block.list.all.pre[[t - 1]] + } else { + #if we want to update q from previous SDA runs. + block.list.pre <- block.list.all[[t - 1]] + } + for (i in seq_along(block.list)) { + nvar <- length(block.list[[i]]$data$muf) + nobs <- length(block.list[[i]]$data$y.censored) + if (block.list[[i]]$constant$q.type == 3) { + #copy previous aqq and bqq to the current t + block.list[[i]]$aqq <- block.list.pre[[i]]$aqq + block.list[[i]]$bqq <- block.list.pre[[i]]$bqq + #update aq and bq + block.list[[i]]$data$aq <- block.list[[i]]$aqq[block.list[[i]]$constant$H, t] + block.list[[i]]$data$bq <- block.list[[i]]$bqq[block.list[[i]]$constant$H, t] + } else if (block.list[[i]]$constant$q.type == 4) { + #initialize aqq and bqq for nt + block.list[[i]]$aqq <- block.list.pre[[i]]$aqq + block.list[[i]]$bqq <- block.list.pre[[i]]$bqq + #if previous Q is smaller than the actual YN. + if (block.list.pre[[i]]$bqq[t] <= block.list[[i]]$constant$YN) { + block.list[[i]]$bqq[t] <- block.list[[i]]$constant$YN + } + #update aq and bq based on aqq and bqq + block.list[[i]]$data$aq <- GrabFillMatrix(block.list[[i]]$aqq[,,t], block.list[[i]]$constant$H) + block.list[[i]]$data$bq <- block.list[[i]]$bqq[t] + } + } + } + } else { + #TODO: Implement the feature that Q can be updated based on the pft types. + } + + #return values. + block.list.all[[t]] <- block.list + return(block.list.all) +} + +##' @title block.2.vector +##' @name block.2.vector +##' @author Dongchen Zhang +##' +##' @param block.list lists of blocks generated by the `build.block.xy` function. +##' @param X A matrix contains ensemble forecasts. +##' @param H H index created by the `construct_nimble_H` function. +##' +##' @return It returns a list of analysis results by MCMC sampling. +block.2.vector <- function (block.list, X, H) { + site.ids <- attributes(X)$Site + mu.f <- mu.a <- c() + Pf <- Pa <- matrix(0, length(site.ids), length(site.ids)) + for (L in block.list) { + ind <- c() + for (id in L$site.ids) { + ind <- c(ind, which(site.ids == id)) + } + #convert mu.f and pf + mu.a[ind] <- mu.f[ind] <- L$update$mufa + Pa[ind, ind] <- Pf[ind, ind] <- L$update$pfa + #convert mu.a and pa + ind <- intersect(ind, H$H.ind) + mu.a[ind] <- L$update$mua + Pa[ind, ind] <- L$update$pa + } + return(list(mu.f = mu.f, + Pf = Pf, + mu.a = mu.a, + Pa = Pa)) +} \ No newline at end of file diff --git a/modules/assim.sequential/R/Analysis_sda_multiSite.R b/modules/assim.sequential/R/Analysis_sda_multiSite.R index d832a7b41d8..9e38a466e6f 100644 --- a/modules/assim.sequential/R/Analysis_sda_multiSite.R +++ b/modules/assim.sequential/R/Analysis_sda_multiSite.R @@ -15,7 +15,7 @@ ##' ##' @return It returns a list with estimated mean and cov matrix of forecast state variables as well as mean and cov estimated as a result of assimilation/analysis . ##' @export -EnKF.MultiSite <-function(settings, Forecast, Observed, H, extraArg=NULL, ...){ +EnKF.MultiSite <- function(settings, Forecast, Observed, H, extraArg=NULL, ...){ #------------------------------Setup Localization.FUN <- settings$state.data.assimilation$Localization.FUN # localization function scalef <- settings$state.data.assimilation$scalef %>% as.numeric() # scale factor for localization @@ -72,7 +72,7 @@ EnKF.MultiSite <-function(settings, Forecast, Observed, H, extraArg=NULL, ...){ ##' @rdname GEF ##' @export -GEF.MultiSite<-function(settings, Forecast, Observed, H, extraArg,...){ +GEF.MultiSite <- function(settings, Forecast, Observed, H, extraArg,...){ #-- reading the dots and exposing them to the inside of the function dots<-list(...) if (length(dots) > 0) lapply(names(dots),function(name){assign(name,dots[[name]], pos = 1 )}) @@ -133,128 +133,11 @@ GEF.MultiSite<-function(settings, Forecast, Observed, H, extraArg,...){ # if we had censored data and we don't have pre-calculated Pf. ###-------------------------------------------------------------------###---- if (censored.data && is.null(extraArg$Pf)) { - intervalX <- matrix(NA, ncol(X), 2) - rownames(intervalX) <- colnames(X) - outdir <- settings$modeloutdir - #TO DO: Not working for fcomp - for (i in 1:length(var.names)) { - intervalX[which(startsWith(rownames(intervalX), - var.names[i])),] <- - matrix(c( - as.numeric( - settings$state.data.assimilation$state.variables[[i]]$min_value - ), - as.numeric( - settings$state.data.assimilation$state.variables[[i]]$max_value - ) - ), - length(which(startsWith( - rownames(intervalX), - var.names[i] - ))), 2, byrow = TRUE) - - } - #### These vectors are used to categorize data based on censoring from the interval matrix - x.ind <- x.censored <- matrix(NA, ncol = ncol(X), nrow = nrow(X)) - for (j in seq_along(mu.f)) { - for (n in seq_len(nrow(X))) { - x.ind[n, j] <- as.numeric(X[n, j] > 0) - x.censored[n, j] <- - as.numeric(ifelse(X[n, j] > intervalX[j, 2], 0, X[n, j])) # - } - } - - if (t == 1) { - #The purpose of this step is to impute data for mu.f - #where there are zero values so that - #mu.f is in 'tobit space' in the full model - diag(Pf)[which(diag(Pf)<0.1)] <- min(diag(Pf)[which(diag(Pf) >= 0.1)]) #fixing det(Pf)==0 - constants.tobit2space = list(N = nrow(X), - J = length(mu.f)) - data.tobit2space <- list(y.ind = x.ind, - y.censored = x.censored, - mu_0 = rep(0,length(mu.f)), - lambda_0 = solve(diag(1000,length(mu.f))), #can try solve - nu_0 = ncol(X)+1, - wts = wts*nrow(X), #sigma x2 max Y - Sigma_0 = solve(diag(1000,length(mu.f))))#some measure of prior obs - inits.tobit2space <<- - list(pf = Pf, muf = colMeans(X)) #pf = cov(X) - #set.seed(0) - #ptm <- proc.time() - tobit2space_pred <<- - nimbleModel( - tobit2space.model, - data = data.tobit2space, - constants = constants.tobit2space, - inits = inits.tobit2space, - name = 'space' - ) - ## Adding X.mod,q,r as data for building model. - conf_tobit2space <<- - configureMCMC(tobit2space_pred, thin = 10, print = TRUE) - conf_tobit2space$addMonitors(c("pf", "muf", "y.censored")) - ## important! - ## this is needed for correct indexing later - samplerNumberOffset_tobit2space <<- - length(conf_tobit2space$getSamplers()) - - for (j in seq_along(mu.f)) { - for (n in seq_len(nrow(X))) { - node <- paste0('y.censored[', n, ',', j, ']') - conf_tobit2space$addSampler(node, 'toggle', control = list(type = - 'RW')) - } - } - - #conf_tobit2space$printSamplers() - - Rmcmc_tobit2space <<- buildMCMC(conf_tobit2space) - - Cmodel_tobit2space <<- compileNimble(tobit2space_pred) - Cmcmc_tobit2space <<- - compileNimble(Rmcmc_tobit2space, project = tobit2space_pred) - - for (i in seq_along(X)) { - ## ironically, here we have to "toggle" the value of y.ind[i] - ## this specifies that when y.ind[i] = 1, - ## indicator variable is set to 0, which specifies *not* to sample - valueInCompiledNimbleFunction(Cmcmc_tobit2space$samplerFunctions[[samplerNumberOffset_tobit2space +i]], - 'toggle',1 - x.ind[i]) - } - - } else{ - Cmodel_tobit2space$y.ind <- x.ind - Cmodel_tobit2space$y.censored <- x.censored - - inits.tobit2space = list(pf = Pf, muf = colMeans(X)) - Cmodel_tobit2space$setInits(inits.tobit2space) - - for (i in seq_along(X)) { - valueInCompiledNimbleFunction(Cmcmc_tobit2space$samplerFunctions[[samplerNumberOffset_tobit2space +i]], - 'toggle',1 - x.ind[i]) - } - - } - - dat.tobit2space <- - runMCMC(Cmcmc_tobit2space, - nchains = 1, - niter = 50000, - progressBar = TRUE) - #dat.tobit2space <- do.call(rbind, dat.tobit2space) - save(dat.tobit2space, file = file.path(settings$outdir, paste0('censored',t,'.Rdata'))) - ## update parameters - mu.f <- - colMeans(dat.tobit2space[, grep("muf", colnames(dat.tobit2space))]) - Pf <- - matrix(colMeans(dat.tobit2space[, grep("pf", colnames(dat.tobit2space))]), ncol(X), ncol(X)) - - - - iycens <- grep("y.censored", colnames(dat.tobit2space)) - X.new <- - matrix(colMeans(dat.tobit2space[, iycens]), nrow(X), ncol(X)) + out.cens<-tobit_model_censored (settings, X, var.names, mu.f, Pf, t) + mu.f <- out.cens$mu.f + Pf <- out.cens$Pf + iycens <- out.cens$iycens + X.new <- out.cens$X.new } # end of if we have censored data ###-------------------------------------------------------------------### @@ -284,9 +167,9 @@ GEF.MultiSite<-function(settings, Forecast, Observed, H, extraArg,...){ purrr::map('site') %>% purrr::map('site.pft') %>% purrr::map('pft.name') %>% - modify(as.factor) %>% - modify(as.numeric) %>% - modify_if(function(x) { + purrr::modify(as.factor) %>% + purrr::modify(as.numeric) %>% + purrr::modify_if(function(x) { if (length(x) > 0) { return(FALSE) } else{ @@ -346,138 +229,22 @@ GEF.MultiSite<-function(settings, Forecast, Observed, H, extraArg,...){ #### from the interval matrix y.ind <- as.numeric(Y > interval[, 1]) y.censored <- as.numeric(ifelse(Y > interval[, 1], Y, 0)) - recompileGEF <- extraArg$recompileGEF - if(t > 1){ - if(length(extraArg$pre_elements) != length(elements.W.Data)){ - recompileGEF <- TRUE - } - } - if(t == 1 | recompileGEF){ #TO DO need to make something that works to pick whether to compile or not - # initial Q depends on the size of aqq - #Initial values - inits.pred <- - list( - X.mod = as.vector(mu.f), - X = as.vector(mu.f)[elements.W.Data], - Xall = as.vector(mu.f), - Xs = as.vector(mu.f)[elements.W.Data], - q = diag(1, length(elements.W.Data), length(elements.W.Data)) - ) # - dimensions.tobit = list(X = length(elements.W.Data), - X.mod = ncol(X), - Q = c(nrow(aqq), ncol(aqq)) - ) - # Contants defined in the model - constants.tobit <- - list( - N = ncol(X), - YN = length(elements.W.Data), - nH = length(elements.W.Data), - H = elements.W.Data, - NotH = which(!(1:ncol(X) %in% elements.W.Data )), - nNotH = which(!(1:ncol(X) %in% elements.W.Data )) %>% length(), - q.type=q.type - ) - # Data used for setting the likelihood and other stuff - data.tobit <- - list( - muf = as.vector(mu.f), - pf = Pf, - aq = aqq[,,t], - bq = bqq[t], - y.ind = y.ind, - y.censored = y.censored, - r = solve(R) - ) - - # This is the first step in making the nimble model - Nimble does some preliminary checks on the code - #special case for YN == 1 to run nimble model without for loops around nH - if(constants.tobit$YN == 1){ - #add error message if trying to run SDA with 1 obs and 1 state variable no model currently exists to handle this case, need to remove for loop from GEF_singleobs_nimble for this case and save new model - if(constants.tobit$N == 1){ - PEcAn.logger::logger.error("No model exists for assimilating 1 observation and 1 state variable, add more state variables or edit GEF_singleobs_nimble to work with 1 state variable") - } - #slight adjustment to inputs for nimble function when running with 1 obs - inits.pred$qq <- 0.368 - dimensions.tobit$y.censored <- 1 - dimensions.tobit$y.ind <- 1 - constants.tobit$q.type <- NULL - - model_pred <- nimbleModel(GEF_singleobs_nimble, - data = data.tobit, - dimensions = dimensions.tobit, - constants = constants.tobit, - inits = inits.pred, - name = 'base') - }else{ - model_pred <- nimbleModel(GEF.MultiSite.Nimble, - data = data.tobit, - dimensions = dimensions.tobit, - constants = constants.tobit, - inits = inits.pred, - name = 'base') - } - model_pred$initializeInfo() - ## Adding X.mod,q,r as data for building model. - conf <- configureMCMC(model_pred, print=TRUE) - - conf$addMonitors(c("X","Xall","q","Xs")) - samplerNumberOffset <<- length(conf$getSamplers()) - - for(i in 1:length(y.ind)) { - node <- paste0('y.censored[',i,']') - conf$addSampler(node, 'toggle', control=list(type='RW')) - } - - conf$printSamplers() - Rmcmc <<- buildMCMC(conf) - Cmodel <<- compileNimble(model_pred) - Cmcmc <<- compileNimble(Rmcmc, project = model_pred, showCompilerOutput = TRUE) - - for(i in 1:length(y.ind)) { - valueInCompiledNimbleFunction(Cmcmc$samplerFunctions[[samplerNumberOffset+i]], 'toggle', 1-y.ind[i]) - } - save( - inits.pred, - dimensions.tobit, - constants.tobit, - data.tobit, - model_pred, - conf, - Rmcmc, - Cmodel, - Cmcmc, - file = file.path(settings$outdir,"NimbleVars.RData") - ) - # if t>1 in GEF -------------------------------------------- - } else { - - Cmodel$y.ind <- y.ind - Cmodel$y.censored <- y.censored - Cmodel$aq <- aqq[ , ,t] - Cmodel$bq <- bqq[t] - Cmodel$muf <- mu.f - Cmodel$pf <- Pf - Cmodel$r <- solve(R) - inits.pred <- - list( - X.mod = as.vector(mu.f), - X = as.vector(mu.f)[elements.W.Data], - Xall = as.vector(mu.f), - Xs = as.vector(mu.f)[elements.W.Data], - q = diag(1, length(elements.W.Data), length(elements.W.Data)) - ) # - Cmodel$setInits(inits.pred) - - for(i in 1:length(y.ind)) { - ## ironically, here we have to "toggle" the value of y.ind[i] - ## this specifies that when y.ind[i] = 1, - ## indicator variable is set to 0, which specifies *not* to sample - valueInCompiledNimbleFunction(Cmcmc$samplerFunctions[[samplerNumberOffset+i]], 'toggle', 1-y.ind[i]) - } - - } - dat <- runMCMC(Cmcmc, niter = nitr.GEF, nburnin = nburnin, thin = nthin, nchains = 1) + data <- list(elements.W.Data = elements.W.Data, + X = X, + Pf = Pf, + aqq = aqq, + bqq = bqq, + mu.f = mu.f, + q.type = q.type, + R = R, + y.censored = y.censored, + y.ind = y.ind, + nitr.GEF = extraArg$nitr.GEF, + nburnin = extraArg$nburnin, + nthin = extraArg$nthin, + monitors = c("Xall", "qq")) + outputs <- furrr::future_map(lapply(rep("data", as.numeric(settings$state.data.assimilation$chains)), get), MCMC_function) + dat <- do.call(rbind, outputs) #---- Saving the chains save(dat, file=file.path(settings$outdir, paste0('dat',t,'.Rdata'))) @@ -496,8 +263,9 @@ GEF.MultiSite<-function(settings, Forecast, Observed, H, extraArg,...){ # Setting up the prior for the next step from the posterior of this step if (t < nt){ if (q.type == single.q){ #if it's a gamma case - aqq[1, 1, t + 1] <- mean(mq) - bqq[t + 1] <- stats::var(mq %>% as.numeric()) + qq <- dat[, grep("qq", colnames(dat))] + aqq[1, 1, t + 1] <- (mean(qq))^2/stats::var(qq) + bqq[t + 1] <- mean(qq)/stats::var(qq) } else { # if it's a wish case col <- matrix(1:length(elements.W.Data) ^ 2, length(elements.W.Data), @@ -528,10 +296,109 @@ GEF.MultiSite<-function(settings, Forecast, Observed, H, extraArg,...){ n = n, X.new=X.new, aqq=aqq, - bqq=bqq, - elements.W.Data=elements.W.Data + bqq=bqq ) ) } - +##' @title MCMC_function +##' @author Michael Dietze \email{dietze@@bu.edu}, Ann Raiho, Hamze Dokoohaki, and Dongchen Zhang. +##' @param data list containing everything needed for the MCMC sampling. +##' @details This function replace the previous code where implenmented the MCMC sampling part, which allows the MCMC sampling of multiple chains under parallel mode. +MCMC_function <- function(data){ + dimensions.tobit <- list(X = length(data$elements.W.Data), + X.mod = ncol(data$X), + Q = c(nrow(data$aqq), ncol(data$aqq)) + ) + # Contants defined in the model + constants.tobit <- + list( + N = ncol(data$X), + YN = length(data$elements.W.Data), + nH = length(data$elements.W.Data), + H = data$elements.W.Data, + NotH = which(!(1:ncol(data$X) %in% data$elements.W.Data)), + nNotH = which(!(1:ncol(data$X) %in% data$elements.W.Data)) %>% length(), + q.type=data$q.type + ) + # Data used for setting the likelihood and other stuff + data.tobit <- + list( + muf = as.vector(data$mu.f), + pf = data$Pf, + aq = data$aqq[,,t], + bq = data$bqq[t], + y.ind = data$y.ind, + y.censored = data$y.censored, + r = solve(data$R) + ) + if(constants.tobit$YN == 1){ + #add error message if trying to run SDA with 1 obs and 1 state variable no model currently exists to handle this case, need to remove for loop from GEF_singleobs_nimble for this case and save new model + if(constants.tobit$N == 1){ + PEcAn.logger::logger.error("No model exists for assimilating 1 observation and 1 state variable, add more state variables or edit GEF_singleobs_nimble to work with 1 state variable") + return(0) + } + #slight adjustment to inputs for nimble function when running with 1 obs + inits.pred$qq <- 0.368 + dimensions.tobit$y.censored <- 1 + dimensions.tobit$y.ind <- 1 + constants.tobit$q.type <- NULL + inits.pred <- + list( + X.mod = as.vector(data$mu.f), + X = as.vector(data$mu.f)[data$elements.W.Data], + Xall = as.vector(data$mu.f), + Xs = as.vector(data$mu.f)[data$elements.W.Data], + q = diag(1, length(data$elements.W.Data), length(data$elements.W.Data)) + ) + model_pred <- nimble::nimbleModel(GEF_singleobs_nimble, + data = data.tobit, + dimensions = dimensions.tobit, + constants = constants.tobit, + inits = inits.pred, + name = 'base') + }else{ + model_pred <- nimble::nimbleModel(GEF.MultiSite.Nimble, + data = data.tobit, + dimensions = dimensions.tobit, + constants = constants.tobit, + name = 'base') + } + ## Adding X.mod,q,r as data for building model. + conf <- nimble::configureMCMC(model_pred, print=TRUE) + conf$setMonitors(data$monitors) + samplerNumberOffset <- length(conf$getSamplers()) + + for(i in 1:length(data$y.ind)) { + node <- paste0('y.censored[',i,']') + conf$addSampler(node, 'toggle', control=list(type='RW')) + } + #handling samplers + samplerLists <- conf$getSamplers() + samplerLists[[2]]$control <- list(propCov= data$Pf, adaptScaleOnly = TRUE, adaptive = TRUE) + conf$setSamplers(samplerLists) + + conf$printSamplers() + Rmcmc <- nimble::buildMCMC(conf) + Cmodel <- nimble::compileNimble(model_pred) + Cmcmc <- nimble::compileNimble(Rmcmc, project = model_pred, showCompilerOutput = TRUE) + + for(i in 1:length(data$y.ind)) { + valueInCompiledNimbleFunction(Cmcmc$samplerFunctions[[samplerNumberOffset+i]], 'toggle', 1-data$y.ind[i]) + } + inits <- function(){ + ind <- sample(seq_along(1:nrow(data$X)), 1) + init_muf <- data$X[ind,] + list(X.mod = as.vector(init_muf), + X = as.vector(init_muf)[data$elements.W.Data], + Xall = as.vector(init_muf), + Xs = as.vector(init_muf)[data$elements.W.Data], + q = diag(1, length(data$elements.W.Data), length(data$elements.W.Data))) + } + if(exists("inits.pred")){ + dat <- runMCMC(Cmcmc, niter = data$nitr.GEF, nburnin = data$nburnin, thin = data$nthin, nchains = 1) + }else{ + dat <- runMCMC(Cmcmc, niter = data$nitr.GEF, nburnin = data$nburnin, thin = data$nthin, nchains = 1, inits = inits) + } + return(dat) +} \ No newline at end of file diff --git a/modules/assim.sequential/R/Create_Site_PFT_CSV.R b/modules/assim.sequential/R/Create_Site_PFT_CSV.R index e41cb2e6101..3f75b894038 100644 --- a/modules/assim.sequential/R/Create_Site_PFT_CSV.R +++ b/modules/assim.sequential/R/Create_Site_PFT_CSV.R @@ -26,7 +26,7 @@ Create_Site_PFT_CSV <- function(settings, Ecoregion, NLCD, con){ # Bail out if packages in Suggests not available - suggests_needed <- c("DBI", "glue", "raster") + suggests_needed <- c("glue", "raster") suggests_found <- sapply(suggests_needed, requireNamespace, quietly = TRUE) if (!all(suggests_found)) { PEcAn.logger::logger.error( @@ -48,8 +48,7 @@ Create_Site_PFT_CSV <- function(settings, Ecoregion, NLCD, con){ suppressWarnings(site_qry <- glue::glue_sql("SELECT *, ST_X(ST_CENTROID(geometry)) AS lon, ST_Y(ST_CENTROID(geometry)) AS lat FROM sites WHERE id IN ({ids*})", ids = site_ID, .con = con)) - suppressWarnings(qry_results <- DBI::dbSendQuery(con,site_qry)) - suppressWarnings(qry_results <- DBI::dbFetch(qry_results)) + suppressWarnings(qry_results <- PEcAn.DB::db.query(site_qry, con)) site_info <- list(site_id=qry_results$id, site_name=qry_results$sitename, lat=qry_results$lat, lon=qry_results$lon, time_zone=qry_results$time_zone) diff --git a/modules/assim.sequential/R/GEF_Helper.R b/modules/assim.sequential/R/GEF_Helper.R new file mode 100644 index 00000000000..9012cc803c5 --- /dev/null +++ b/modules/assim.sequential/R/GEF_Helper.R @@ -0,0 +1,148 @@ +#' tobit_model_censored +#' +#' @param settings (list) pecan standard settings list. +#' @param X (numeric) A matrix contains ensemble forecasts (ensembles * variables). +#' @param var.names (character) variable names. +#' @param mu.f (numeric) forecast mean values. +#' @param Pf (numeric) forecast covariance matrix. +#' @param t (numeric) timestep. If t=1, initial values are imputed for zero values in mu.f +#' +#' @return list with updated mu.f, pf, X, and indication of which y values are censored +#' @export +#' +#' @examples +tobit_model_censored <- function(settings, X, var.names, mu.f, Pf, t) { + intervalX <- matrix(NA, ncol(X), 2) + rownames(intervalX) <- colnames(X) + outdir <- settings$modeloutdir + #TO DO: Not working for fcomp + for (i in 1:length(var.names)) { + intervalX[which(startsWith(rownames(intervalX), + var.names[i])), ] <- + matrix(c( + as.numeric( + settings$state.data.assimilation$state.variables[[i]]$min_value + ), + as.numeric( + settings$state.data.assimilation$state.variables[[i]]$max_value + ) + ), + length(which(startsWith( + rownames(intervalX), + var.names[i] + ))), 2, byrow = TRUE) + + } + #### These vectors are used to categorize data based on censoring from the interval matrix + x.ind <- + x.censored <- matrix(NA, ncol = ncol(X), nrow = nrow(X)) + for (j in seq_along(mu.f)) { + for (n in seq_len(nrow(X))) { + x.ind[n, j] <- as.numeric(X[n, j] > 0) + x.censored[n, j] <- + as.numeric(ifelse(X[n, j] > intervalX[j, 2], 0, X[n, j])) # + } + } + + if (t == 1) { + #The purpose of this step is to impute data for mu.f + #where there are zero values so that + #mu.f is in 'tobit space' in the full model + constants.tobit2space = list(N = nrow(X), + J = length(mu.f)) + + data.tobit2space = list( + y.ind = x.ind, + y.censored = x.censored, + mu_0 = rep(0, length(mu.f)), + lambda_0 = diag(10, length(mu.f)), + nu_0 = 3 + )#some measure of prior obs + + inits.tobit2space <<- + list(pf = Pf, muf = colMeans(X)) #pf = cov(X) + #set.seed(0) + #ptm <- proc.time() + tobit2space_pred <<- + nimbleModel( + tobit2space.model, + data = data.tobit2space, + constants = constants.tobit2space, + inits = inits.tobit2space, + name = 'space' + ) + ## Adding X.mod,q,r as data for building model. + conf_tobit2space <<- + configureMCMC(tobit2space_pred, thin = 10, print = TRUE) + conf_tobit2space$addMonitors(c("pf", "muf", "y.censored")) + ## important! + ## this is needed for correct indexing later + samplerNumberOffset_tobit2space <<- + length(conf_tobit2space$getSamplers()) + + for (j in seq_along(mu.f)) { + for (n in seq_len(nrow(X))) { + node <- paste0('y.censored[', n, ',', j, ']') + conf_tobit2space$addSampler(node, 'toggle', control = list(type = + 'RW')) + } + } + + #conf_tobit2space$printSamplers() + + Rmcmc_tobit2space <<- buildMCMC(conf_tobit2space) + + Cmodel_tobit2space <<- compileNimble(tobit2space_pred) + Cmcmc_tobit2space <<- + compileNimble(Rmcmc_tobit2space, project = tobit2space_pred) + + for (i in seq_along(X)) { + ## ironically, here we have to "toggle" the value of y.ind[i] + ## this specifies that when y.ind[i] = 1, + ## indicator variable is set to 0, which specifies *not* to sample + valueInCompiledNimbleFunction(Cmcmc_tobit2space$samplerFunctions[[samplerNumberOffset_tobit2space + + i]], + 'toggle', + 1 - x.ind[i]) + } + + } else{ + Cmodel_tobit2space$y.ind <- x.ind + Cmodel_tobit2space$y.censored <- x.censored + + inits.tobit2space = list(pf = Pf, muf = colMeans(X)) + Cmodel_tobit2space$setInits(inits.tobit2space) + + for (i in seq_along(X)) { + valueInCompiledNimbleFunction(Cmcmc_tobit2space$samplerFunctions[[samplerNumberOffset_tobit2space + + i]], + 'toggle', + 1 - x.ind[i]) + } + + } + + dat.tobit2space <- + runMCMC(Cmcmc_tobit2space, + niter = 50000, + progressBar = TRUE) + + ## update parameters + mu.f <- + colMeans(dat.tobit2space[, grep("muf", colnames(dat.tobit2space))]) + Pf <- + matrix(colMeans(dat.tobit2space[, grep("pf", colnames(dat.tobit2space))]), ncol(X), ncol(X)) + + + + iycens <- grep("y.censored", colnames(dat.tobit2space)) + X.new <- + matrix(colMeans(dat.tobit2space[, iycens]), nrow(X), ncol(X)) + + return(list(mu.f = mu.f, + Pf=Pf, + iycens=iycens, + X.new=X.new + )) + +} \ No newline at end of file diff --git a/modules/assim.sequential/R/Helper.functions.R b/modules/assim.sequential/R/Helper.functions.R index d53c5759c72..9c3ccfcc31f 100644 --- a/modules/assim.sequential/R/Helper.functions.R +++ b/modules/assim.sequential/R/Helper.functions.R @@ -11,15 +11,15 @@ #' outlier.detector.boxplot<-function(X) { X <- X %>% - map(function(X.tmp){ + purrr::map(function(X.tmp){ #X.tmp is all the state variables for each element of the list (site) X.tmp %>% - map_dfc(function(col.tmp){ + purrr::map_dfc(function(col.tmp){ #naive way of finding the outlier - 3 * IQR - OutVals <- boxplot(col.tmp, plot = FALSE)$out + OutVals <- graphics::boxplot(col.tmp, plot = FALSE)$out # if I make this NA then it would stay NA for ever. #bc adjustment uses X to and comes up with new analysis - col.tmp[which((col.tmp %in% OutVals))] <- median(col.tmp, na.rm = TRUE) + col.tmp[which((col.tmp %in% OutVals))] <- stats::median(col.tmp, na.rm = TRUE) col.tmp }) diff --git a/modules/assim.sequential/R/Multi_Site_Constructors.R b/modules/assim.sequential/R/Multi_Site_Constructors.R index c8eeba37af3..88a30bd56fb 100755 --- a/modules/assim.sequential/R/Multi_Site_Constructors.R +++ b/modules/assim.sequential/R/Multi_Site_Constructors.R @@ -6,6 +6,9 @@ ##' @param var.names vector names of state variable names. ##' @param X a matrix of state variables. In this matrix rows represent ensembles, while columns show the variables for different sites. ##' @param localization.FUN This is the function that performs the localization of the Pf matrix and it returns a localized matrix with the same dimensions. +##' @param t not used +##' @param blocked.dis passed to `localization.FUN` +##' @param ... passed to `localization.FUN` ##' @description The argument X needs to have an attribute pointing the state variables to their corresponding site. This attribute needs to be called `Site`. ##' At the moment, the cov between state variables at blocks defining the cov between two sites are assumed zero. ##' @return It returns the var-cov matrix of state variables at multiple sites. @@ -33,7 +36,7 @@ Contruct.Pf <- function(site.ids, var.names, X, localization.FUN=NULL, t=1, bloc site.cov.orders <- expand.grid(site.ids,site.ids) %>% dplyr::filter( .data$Var1 != .data$Var2) - for (i in 1:nrow(site.cov.orders)){ + for (i in seq_len(nrow(site.cov.orders))){ # first we need to find out where to put it in the big matrix rows.in.matrix <- which(attr(X,"Site") %in% site.cov.orders[i,1]) cols.in.matrix <- which(attr(X,"Site") %in% site.cov.orders[i,2]) @@ -46,7 +49,7 @@ Contruct.Pf <- function(site.ids, var.names, X, localization.FUN=NULL, t=1, bloc } # if I see that there is a localization function passed to this - I run it by the function. - if (!is.null(localization.FUN)) { + if (!is.null(localization.FUN) && nsite > 1) { pf.matrix.out <- localization.FUN (pf.matrix, blocked.dis, ...) } else{ pf.matrix.out <- pf.matrix @@ -200,3 +203,96 @@ Construct.H.multisite <- function(site.ids, var.names, obs.t.mean){ } H } + +##' @title construct_nimble_H +##' @name construct_nimble_H +##' @author Dongchen Zhang +##' +##' @param site.ids a vector name of site ids +##' @param var.names vector names of state variable names +##' @param obs.t list of vector of means for the time t for different sites. +##' @param pft.path physical path to the pft.csv file. +##' @param by criteria, it supports by variable, site, pft, all, and single Q. +##' +##' @description This function is an upgrade to the Construct.H.multisite function which provides the index by different criteria. +##' +##' @return Returns one vector containing index for which Q to be estimated for which variable, +##' and the other vector gives which state variable has which observation (= element.W.Data). +##' @export +construct_nimble_H <- function(site.ids, var.names, obs.t, pft.path = NULL, by = "single"){ + if(by == "pft" | by == "block_pft_var" & is.null(pft.path)){ + PEcAn.logger::logger.info("please provide pft path.") + return(0) + } + H <- Construct.H.multisite(site.ids, var.names, obs.t) + if (by == "var") { + total_var_name <- rep(var.names, length(site.ids)) + Ind <- rep(0, dim(H)[2]) + for (i in seq_along(var.names)) { + Ind[which(total_var_name == var.names[i])] <- i + } + } else if (by == "site") { + total_site_id <- rep(site.ids, each = length(var.names)) + Ind <- rep(0, dim(H)[2]) + for (i in seq_along(site.ids)) { + Ind[which(total_site_id == site.ids[i])] <- i + } + } else if (by == "pft") { + pft <- utils::read.csv(pft.path) + rownames(pft) <- pft$site + total_site_id <- rep(site.ids, each = length(var.names)) + total_pft <- pft[total_site_id, 2] + Ind <- rep(0, dim(H)[2]) + pft.names <- sort(unique(pft$pft)) + for (i in seq_along(pft.names)) { + Ind[which(total_pft == pft.names[i])] <- i + } + } else if (by == "block_pft_var") { + #by pft + pft <- utils::read.csv(pft.path) + rownames(pft) <- pft$site + total_site_id <- rep(site.ids, each = length(var.names)) + total_pft <- pft[total_site_id, 2] + Ind_pft <- rep(0, dim(H)[2]) + pft.names <- sort(unique(pft$pft)) + for (i in seq_along(pft.names)) { + Ind_pft[which(total_pft == pft.names[i])] <- i + } + #by var + total_var_name <- rep(var.names, length(site.ids)) + Ind_var <- rep(0, dim(H)[2]) + for (i in seq_along(var.names)) { + Ind_var[which(total_var_name == var.names[i])] <- i + } + #by site + total_site_id <- rep(site.ids, each = length(var.names)) + Ind_site <- rep(0, dim(H)[2]) + for (i in seq_along(site.ids)) { + Ind_site[which(total_site_id == site.ids[i])] <- i + } + # #create reference to which block and which var + # #Ind for which site should use which block + # block.index <- var.index <- Ind_site + # for (i in seq_along(Ind_site)) { + # Ind_block[i] <- Ind_pft[i] + # } + } else if (by == "all") { + Ind <- 1:dim(H)[2] + } else if (by == "single") { + Ind <- rep(1, dim(H)[2]) + } else { + PEcAn.logger::logger.info("Couldn't find the proper by argument!") + return(0) + } + if (by == "block_pft_var") { + return(list(Ind_pft = Ind_pft[which(apply(H, 2, sum) == 1)], + Ind_site = Ind_site[which(apply(H, 2, sum) == 1)], + Ind_var = Ind_var[which(apply(H, 2, sum) == 1)], + H.ind = which(apply(H, 2, sum) == 1))) + } else { + return(list(Q.ind = Ind[which(apply(H, 2, sum) == 1)], + H.ind = which(apply(H, 2, sum) == 1), + H.matrix = H)) + } + +} \ No newline at end of file diff --git a/modules/assim.sequential/R/Nimble_codes.R b/modules/assim.sequential/R/Nimble_codes.R index 38929c17e25..cc63a7e2968 100644 --- a/modules/assim.sequential/R/Nimble_codes.R +++ b/modules/assim.sequential/R/Nimble_codes.R @@ -176,48 +176,63 @@ tobit.model <- nimbleCode({ #' @format TBD #' @export GEF.MultiSite.Nimble <- nimbleCode({ - if (q.type == 1) { - # Sorting out qs - qq ~ dgamma(aq, bq) ## aq and bq are estimated over time - q[1:YN, 1:YN] <- qq * diag(YN) - } else if (q.type == 2) { - # Sorting out qs - q[1:YN, 1:YN] ~ dwish(R = aq[1:YN, 1:YN], df = bq) ## aq and bq are estimated over time - - } - # X model X.mod[1:N] ~ dmnorm(mean = muf[1:N], cov = pf[1:N, 1:N]) - - for (i in 1:nH) { - tmpX[i] <- X.mod[H[i]] - Xs[i] <- tmpX[i] - } - ## add process error to x model but just for the state variables that we have data and H knows who - X[1:YN] ~ dmnorm(Xs[1:YN], prec = q[1:YN, 1:YN]) - - ## Likelihood - y.censored[1:YN] ~ dmnorm(X[1:YN], prec = r[1:YN, 1:YN]) - - # #puting the ones that they don't have q in Xall - They come from X.model - # # If I don't have data on then then their q is not identifiable, so we use the same Xs as Xmodel - if(nNotH > 0){ - for (j in 1:nNotH) { - tmpXmod[j] <- X.mod[NotH[j]] - Xall[NotH[j]] <- tmpXmod[j] + if (q.type == 1 | q.type == 2) { + if (q.type == 1) {#single Q + # Sorting out qs + qq ~ dgamma(aq, bq) ## aq and bq are estimated over time + q[1:YN, 1:YN] <- qq * diag(YN) + } else if (q.type == 2) {#site Q + # Sorting out qs + q[1:YN, 1:YN] ~ dwish(R = aq[1:YN, 1:YN], df = bq) ## aq and bq are estimated over time } + + for (i in 1:nH) { + tmpX[i] <- X.mod[H[i]] + Xs[i] <- tmpX[i] + } + ## add process error to x model but just for the state variables that we have data and H knows who + X[1:YN] ~ dmnorm(Xs[1:YN], prec = q[1:YN, 1:YN]) + + ## Likelihood + y.censored[1:YN] ~ dmnorm(X[1:YN], prec = r[1:YN, 1:YN]) + + # #puting the ones that they don't have q in Xall - They come from X.model + # # If I don't have data on then then their q is not identifiable, so we use the same Xs as Xmodel + if(nNotH > 0){ + for (j in 1:nNotH) { + tmpXmod[j] <- X.mod[NotH[j]] + Xall[NotH[j]] <- tmpXmod[j] + } + } + } else if (q.type == 3) {#Vector Q + for (i in 1:YN) { + #sample Q. + q[i] ~ dgamma(shape = aq[i], rate = bq[i]) + if (length(H) == 1) { + X[i] ~ dnorm(X.mod[H], sd = 1/sqrt(q[i])) + #likelihood + y.censored[i] ~ dnorm(X[i], sd = 1/sqrt(r[i])) + } else { + #sample latent variable X. + X[i] ~ dnorm(X.mod[H[i]], sd = 1/sqrt(q[i])) + #likelihood + y.censored[i] ~ dnorm(X[i], sd = 1/sqrt(r[i, i])) + } + } + } else if (q.type == 4) {#Wishart Q + #if it's a Wishart Q. + #sample Q. + q[1:YN, 1:YN] ~ dwishart(R = aq[1:YN, 1:YN], df = bq) + #sample latent variable X. + for (i in 1:YN) { + Xs[i] <- X.mod[H[i]] + } + X[1:YN] ~ dmnorm(Xs[1:YN], prec = q[1:YN, 1:YN]) + #likelihood + y.censored[1:YN] ~ dmnorm(X[1:YN], prec = r[1:YN, 1:YN]) } - - #These are the one that they have data and their q can be estimated. - for (i in 1:nH) { - tmpXH[i] <- X[i] - Xall[H[i]] <- tmpXH[i] - } - - for (i in 1:YN) { - y.ind[i] ~ dinterval(y.censored[i], 0) - } - }) #sampler_toggle------------------------------------------------------------------------------------------------ @@ -367,4 +382,4 @@ GEF_singleobs_nimble <- nimbleCode({ y.ind[1] ~ dinterval(y.censored[1], 0) -}) +}) \ No newline at end of file diff --git a/modules/assim.sequential/R/Prep_OBS_SDA.R b/modules/assim.sequential/R/Prep_OBS_SDA.R index 46d659c1acd..ff048b3e0ea 100644 --- a/modules/assim.sequential/R/Prep_OBS_SDA.R +++ b/modules/assim.sequential/R/Prep_OBS_SDA.R @@ -31,8 +31,7 @@ Prep_OBS_SDA <- function(settings, out_dir, AGB_dir, Search_Window=30){ suppressWarnings(site_qry <- glue::glue_sql("SELECT *, ST_X(ST_CENTROID(geometry)) AS lon, ST_Y(ST_CENTROID(geometry)) AS lat FROM sites WHERE id IN ({ids*})", ids = site_ID, .con = con)) - suppressWarnings(qry_results <- DBI::dbSendQuery(con,site_qry)) - suppressWarnings(qry_results <- DBI::dbFetch(qry_results)) + suppressWarnings(qry_results <- PEcAn.DB::db.query(site_qry, con)) site_info <- list(site_id=qry_results$id, site_name=qry_results$sitename, lat=qry_results$lat, lon=qry_results$lon, time_zone=qry_results$time_zone) diff --git a/modules/assim.sequential/R/Remote_helpers.R b/modules/assim.sequential/R/Remote_helpers.R index 9ac479710e4..107d4f8a7f7 100644 --- a/modules/assim.sequential/R/Remote_helpers.R +++ b/modules/assim.sequential/R/Remote_helpers.R @@ -17,14 +17,14 @@ Obs.data.prepare.MultiSite <- function(obs.path, site.ids) { # #Filter all the obs just for the sites we are simulating point_list$median_AGB <- - point_list$median_AGB[[1]] %>% filter(Site_ID %in% site.ids) + point_list$median_AGB[[1]] %>% dplyr::filter(.data$Site_ID %in% site.ids) point_list$stdv_AGB <- - point_list$stdv_AGB[[1]] %>% filter(Site_ID %in% site.ids) + point_list$stdv_AGB[[1]] %>% dplyr::filter(.data$Site_ID %in% site.ids) #Finding the orders site.order <- sapply(site.ids, function(x) - which(point_list$median_AGB$Site_ID %in% x)) %>% unlist() %>% as.numeric() %>% na.omit() + which(point_list$median_AGB$Site_ID %in% x)) %>% unlist() %>% as.numeric() %>% stats::na.omit() #Reordering point_list$median_AGB <- point_list$median_AGB[site.order, ] point_list$stdv_AGB <- point_list$stdv_AGB[site.order, ] @@ -32,28 +32,28 @@ Obs.data.prepare.MultiSite <- function(obs.path, site.ids) { # truning lists to dfs for both mean and cov date.obs <- strsplit(names(point_list$median_AGB), "_")[3:length(point_list$median_AGB)] %>% - map_chr( ~ .x[2]) %>% paste0(., "/12/31") + purrr::map_chr( ~ .x[2]) %>% paste0(., "/12/31") #Making in a format that we need obs.mean <- names(point_list$median_AGB)[3:length(point_list$median_AGB)] %>% - map(function(namesl) { + purrr::map(function(namesl) { ((point_list$median_AGB)[[namesl]] %>% - map( ~ .x %>% as.data.frame %>% `colnames<-`(c('AbvGrndWood'))) %>% - setNames(site.ids[1:length(.)]) + purrr::map( ~ .x %>% as.data.frame %>% `colnames<-`(c('AbvGrndWood'))) %>% + stats::setNames(site.ids[1:length(.)]) ) - }) %>% setNames(date.obs) + }) %>% stats::setNames(date.obs) obs.cov <- names(point_list$stdv_AGB)[3:length(point_list$median_AGB)] %>% - map(function(namesl) { + purrr::map(function(namesl) { ((point_list$stdv_AGB)[[namesl]] %>% - map(~ (.x) ^ 2 %>% as.matrix()) %>% - setNames(site.ids[1:length(.)])) + purrr::map(~ (.x) ^ 2 %>% as.matrix()) %>% + stats::setNames(site.ids[1:length(.)])) - }) %>% setNames(date.obs) + }) %>% stats::setNames(date.obs) @@ -67,6 +67,7 @@ Obs.data.prepare.MultiSite <- function(obs.path, site.ids) { #' #' @param settingPath The Path to the setting that will run SDA #' @param ObsPath Path to the obs data which is expected to be an .Rdata. +#' @param run.bash.args Shell commands to be run on the remote host before launching the SDA. See examples #' #' @export #' @return This function returns a list of two pieces of information. One the remote path that SDA is running and the PID of the active run. @@ -95,11 +96,11 @@ SDA_remote_launcher <-function(settingPath, ObsPath, run.bash.args){ - future::plan(future::multiprocess) + future::plan(future::multisession) #--------------------------------------------------------------- # Reading the settings #--------------------------------------------------------------- - settings <- read.settings(settingPath) + settings <- PEcAn.settings::read.settings(settingPath) my_host <- list(name =settings$host$name , tunnel = settings$host$tunnel, user=settings$host$user) local_path <-settings$outdir if (is.null(run.bash.args)) run.bash.args <-"" @@ -109,7 +110,7 @@ SDA_remote_launcher <-function(settingPath, if (is.null(settings$host$folder)) { PEcAn.logger::logger.severe("You need to specify the tag in the tag inside your pecan xml !") PEcAn.logger::logger.severe("The tag is a path which points to where you want to store/run your sda job on the remote machine. ") - } else if (!test_remote(my_host)) { + } else if (!PEcAn.remote::test_remote(my_host)) { PEcAn.logger::logger.severe("There is something wrong with your tunnel !") PEcAn.logger::logger.severe("You can learn more about how to setup your tunnel by checking out the `Remote execution with PEcAn` section in the documentation.") @@ -137,7 +138,7 @@ SDA_remote_launcher <-function(settingPath, folder_name<-"SDA" folder_name <- paste0(c("SDA",fname_p1,fname_p2), collapse = "_") #creating a folder on remote - out <- remote.execute.R(script=paste0("dir.create(\"/",settings$host$folder,"//",folder_name,"\")"), + out <- PEcAn.remote::remote.execute.R(script=paste0("dir.create(\"/",settings$host$folder,"//",folder_name,"\")"), host = my_host, user = my_host$user, scratchdir = ".") @@ -146,7 +147,7 @@ SDA_remote_launcher <-function(settingPath, #--------------------------------------------------------------- # test to see samples.Rdata if ("samples.Rdata" %in% list.files(settings$outdir)){ - remote.copy.to( + PEcAn.remote::remote.copy.to( my_host, paste0(settings$outdir,"//","samples.Rdata"), paste0(settings$host$folder,"//",folder_name), @@ -154,7 +155,7 @@ SDA_remote_launcher <-function(settingPath, stderr = FALSE ) } else if("pft" %in% list.dirs(settings$outdir, full.names=F)) {# test for PFT folder - remote.copy.to( + PEcAn.remote::remote.copy.to( my_host, paste0(settings$outdir,"//pft"), paste0(settings$host$folder,"//",folder_name,"//pft"), @@ -167,7 +168,7 @@ SDA_remote_launcher <-function(settingPath, settings$pfts %>% purrr::map('outdir') %>% - walk(function(pft.dir) { + purrr::walk(function(pft.dir) { settings <<- rapply(settings, function(x) ifelse( @@ -191,7 +192,7 @@ SDA_remote_launcher <-function(settingPath, #--------------------------------------------------------------- # testing the obs path and copying over # testing to see if the path exsits on remote if not it should exist on local - test.remote.obs <- remote.execute.R( + test.remote.obs <- PEcAn.remote::remote.execute.R( script = paste0("dir.exists(\"/", ObsPath, "\")"), host = my_host, user = my_host$user, @@ -201,7 +202,7 @@ SDA_remote_launcher <-function(settingPath, # if path is not remote then check for the local if (!test.remote.obs) { if (file.exists(ObsPath)) { - remote.copy.to( + PEcAn.remote::remote.copy.to( my_host, ObsPath, paste0(settings$host$folder, "//", folder_name, "//Obs//"), @@ -217,7 +218,7 @@ SDA_remote_launcher <-function(settingPath, # Model binary check #--------------------------------------------------------------- - model.binary.path <- remote.execute.R( + model.binary.path <- PEcAn.remote::remote.execute.R( script = paste0("file.exists(\"/", settings$model$binary, "\")"), host = my_host, user = my_host$user, @@ -230,16 +231,16 @@ SDA_remote_launcher <-function(settingPath, # met check #--------------------------------------------------------------- # Finding all the met paths in your settings - if (is.MultiSettings(settings)){ - input.paths <-settings$run %>% map(~.x[['inputs']] %>% map(~.x[['path']])) %>% unlist() + if (PEcAn.settings::is.MultiSettings(settings)){ + input.paths <-settings$run %>% purrr::map(~.x[['inputs']] %>% purrr::map(~.x[['path']])) %>% unlist() } else { - input.paths <-settings$run$inputs %>% map(~.x[['path']]) %>% unlist() + input.paths <-settings$run$inputs %>% purrr::map(~.x[['path']]) %>% unlist() } # see if we can find those mets on remote missing.inputs <- input.paths %>% - map_lgl(function(.x) { - out <- remote.execute.R( + purrr::map_lgl(function(.x) { + out <- PEcAn.remote::remote.execute.R( script = paste0("file.exists(\"/", .x, "\")"), host = my_host, user = my_host$user, @@ -252,7 +253,7 @@ SDA_remote_launcher <-function(settingPath, # if there some missing inputs, lets create a folder and transfer them if (!any(missing.inputs)){ #creating a folder on remote - out <-remote.execute.R(script=paste0("dir.create(\"/",settings$host$folder,"//",folder_name,"//inputs","\")"), + out <-PEcAn.remote::remote.execute.R(script=paste0("dir.create(\"/",settings$host$folder,"//",folder_name,"//inputs","\")"), host = my_host, user = my_host$user, scratchdir = ".") @@ -268,7 +269,7 @@ SDA_remote_launcher <-function(settingPath, need.copy.dirs %>% purrr::walk( ~ #copy over - remote.copy.to( + PEcAn.remote::remote.copy.to( my_host, .x, file.path(settings$host$folder, folder_name, "inputs"), @@ -317,7 +318,7 @@ SDA_remote_launcher <-function(settingPath, #--------------------------------------------------------------- #Create the scratch dir remote_settings <- settings - out <-remote.execute.R(script=paste0("dir.create(\"/",settings$host$folder,"//",folder_name,"//scratch","\")"), + out <-PEcAn.remote::remote.execute.R(script=paste0("dir.create(\"/",settings$host$folder,"//",folder_name,"//scratch","\")"), host = my_host, user = my_host$user, scratchdir = ".") @@ -338,7 +339,7 @@ SDA_remote_launcher <-function(settingPath, PEcAn.settings::write.settings(remote_settings, basename(settingPath), save.setting.dir) # copying over the settings - remote.copy.to( + PEcAn.remote::remote.copy.to( my_host, file.path(save.setting.dir, basename(settingPath)), file.path(settings$host$folder, folder_name), @@ -349,7 +350,7 @@ SDA_remote_launcher <-function(settingPath, # Copying over the luncher and sending the command #--------------------------------------------------------------- # copying over the luncher - remote.copy.to( + PEcAn.remote::remote.copy.to( my_host, system.file("RemoteLauncher", "SDA_launcher.R", package = "PEcAnAssimSequential"), file.path(settings$host$folder,folder_name), @@ -374,7 +375,7 @@ SDA_remote_launcher <-function(settingPath, unlink(paste0(tmpdir,"/Run.bash")) # delete if there is already one exists writeLines(c(bashfile, run.bash.args, cmd), paste0(tmpdir, "/Run.bash")) #copy over the bash file - remote.copy.to( + PEcAn.remote::remote.copy.to( my_host, paste0(tmpdir,"/Run.bash"), paste0(settings$host$folder, "/", folder_name, "/RunBash.sh"), @@ -397,7 +398,7 @@ SDA_remote_launcher <-function(settingPath, ) # Let's see what is the job id of the job doing - out.job.id<-qsub_get_jobid(out = out.job.id[length(out.job.id)], qsub.jobid = settings$host$qsub.jobid, stop.on.error = stop.on.error) + out.job.id<-PEcAn.remote::qsub_get_jobid(out = out.job.id[length(out.job.id)], qsub.jobid = settings$host$qsub.jobid, stop.on.error = stop.on.error) if (length(out.job.id)==0 | is.null(out.job.id)){ PEcAn.logger::logger.severe("Something broke the run before it starts!") @@ -459,7 +460,7 @@ alltocs <-function(fname="tocs.csv") { get(".Data", get(".tictoc", envir = baseenv())) %>% seq_along() %>% - map_dfr(function(x) { + purrr::map_dfr(function(x) { s <- tictoc::toc(quiet = T, log = T) dfout <- data.frame( Task = s$msg %>% as.character(), @@ -468,11 +469,10 @@ alltocs <-function(fname="tocs.csv") { ) return(dfout) }) %>% - mutate(ExecutionTimeP = c(min(TimeElapsed), diff(TimeElapsed))) %>% - write.table( + dplyr::mutate(ExecutionTimeP = c(min(.data$TimeElapsed), diff(.data$TimeElapsed))) %>% + utils::write.table( file = fname, append = T, - sep = ",", row.names = F, col.names = F ) diff --git a/modules/assim.sequential/R/SDA_OBS_Assembler.R b/modules/assim.sequential/R/SDA_OBS_Assembler.R index 9510a2ad89a..315d1fdfb44 100644 --- a/modules/assim.sequential/R/SDA_OBS_Assembler.R +++ b/modules/assim.sequential/R/SDA_OBS_Assembler.R @@ -5,7 +5,7 @@ #' @return list of obs.mean and obs.cov #' @export #' @author Dongchen Zhang -#' @importFrom magrittr %>% +#' @importFrom dplyr %>% #' @importFrom lubridate %m+% #' #' @examples @@ -19,9 +19,24 @@ SDA_OBS_Assembler <- function(settings){ #extract Obs_Prep object from settings. Obs_Prep <- settings$state.data.assimilation$Obs_Prep + #check if we want to proceed the free run without any observations. + if (as.logical(settings$state.data.assimilation$free.run)) { + PEcAn.logger::logger.info("Create obs for free run!") + #calculate time points. + time_points <- obs_timestep2timepoint(Obs_Prep$start.date, Obs_Prep$end.date, Obs_Prep$timestep) + + #generate obs.mean and obs.cov with NULL filled. + obs.mean = vector("list", length(time_points)) %>% `names<-`(time_points) + obs.cov = vector("list", length(time_points)) %>% `names<-`(time_points) + + #save files. + save(obs.mean, file = file.path(Obs_Prep$outdir, "Rdata", "obs.mean.Rdata")) + save(obs.cov, file = file.path(Obs_Prep$outdir, "Rdata", "obs.cov.Rdata")) + return(list(obs.mean = obs.mean, obs.cov = obs.cov)) + } + #prepare site_info offline, because we need to submit this to server remotely, which might not support the Bety connection. - site_info <- settings %>% - purrr::map(~.x[['run']] ) %>% + site_info <- settings$run %>% purrr::map('site')%>% purrr::map(function(site.list){ #conversion from string to number @@ -48,6 +63,7 @@ SDA_OBS_Assembler <- function(settings){ if (names(Obs_Prep)[i] %in% c("timestep", "start.date", "end.date", "outdir")){ next }else{ + PEcAn.logger::logger.info(paste("Entering", names(Obs_Prep)[i])) fun_name <- names(Obs_Prep)[i] var_ind <- c(var_ind, i) } @@ -173,12 +189,13 @@ SDA_OBS_Assembler <- function(settings){ for (j in seq_along(obs.mean[[i]])) { if (sum(is.na(obs.mean[[i]][[j]]))){ na_ind <- which(is.na(obs.mean[[i]][[j]])) - obs.mean[[i]][[j]] <- obs.mean[[i]][[j]][-na_ind] + #obs.mean[[i]][[j]] <- obs.mean[[i]][[j]][-na_ind] if(length(obs.mean[[i]][[j]]) == 1){ obs.cov[[i]][[j]] <- obs.cov[[i]][[j]][-na_ind] }else{ obs.cov[[i]][[j]] <- obs.cov[[i]][[j]][-na_ind, -na_ind] } + obs.mean[[i]][[j]] <- obs.mean[[i]][[j]][-na_ind] } SoilC_ind <- which(names(obs.mean[[i]][[j]]) == "TotSoilCarb") if (length(SoilC_ind) > 0){ @@ -200,8 +217,8 @@ SDA_OBS_Assembler <- function(settings){ Obs_Prep[var_ind] %>% purrr::map(~.x$start.date), Obs_Prep[var_ind] %>% purrr::map(~.x$end.date)), function(var_timestep, var_start_date, var_end_date){ - obs_timestep2timepoint(var_start_date, var_end_date, var_timestep) - }) %>% + obs_timestep2timepoint(var_start_date, var_end_date, var_timestep) + }) %>% purrr::map(function(all_timepoints){ all_timepoints[which(!all_timepoints %in% time_points)] }) %>% @@ -243,4 +260,4 @@ SDA_OBS_Assembler <- function(settings){ save(obs.mean, file = file.path(Obs_Prep$outdir, "Rdata", "obs.mean.Rdata")) save(obs.cov, file = file.path(Obs_Prep$outdir, "Rdata", "obs.cov.Rdata")) list(obs.mean = obs.mean, obs.cov = obs.cov) -} +} \ No newline at end of file diff --git a/modules/assim.sequential/R/aggregate.R b/modules/assim.sequential/R/aggregate.R new file mode 100644 index 00000000000..ac54873259f --- /dev/null +++ b/modules/assim.sequential/R/aggregate.R @@ -0,0 +1,73 @@ +#' @title Aggregation Function +#' @name aggregate +#' @author Harunobu Ishii +#' +#' @param downscale_output Raster file output from downscale_function.R. Read file in this way if stored locally: \code{downscale_output <- readRDS("xxx.rds")} +#' @param polygon_data A spatial polygon object (e.g., an `sf` object) that defines the spatial units for aggregation. +#' This data should be in a coordinate reference system compatible with the raster data (e.g., "EPSG:4326"). +#' @param func A character string specifying the aggregation function to use (e.g., 'mean', 'sum'). +#' @details This function will aggregate previously downscaled carbon flux amount to a spatial unit of choice +#' +#' @return It returns the `polygon_data` with added columns for mean and sum values of the aggregated raster data for each ensemble member. +#' @export +#' @examples +#' \dontrun{ +#' # Download a shapefile of U.S. (polygon data) +#' url <- "https://www2.census.gov/geo/tiger/GENZ2020/shp/cb_2020_us_state_20m.zip" +#' download.file(url, destfile = "polygon/us_states.zip") +#' +#' # Unzip the downloaded file and save locally +#' unzip("polygon/us_states.zip", exdir = "polygon/us_states") +#' us_states <- st_read("polygon/us_states/cb_2020_us_state_20m.shp") +#' saveRDS(us_states, "polygon/us_states.rds") +#' +#' # Load the saved polygon data with Massachusetts as an example +#' us_states <- readRDS("polygon/us_states.rds") +#' state <- "MA" +#' polygon_data <- st_transform(us_states[us_states$STUSPS == state, ], crs = "EPSG:4326") +#' +#' # Load the downscaled raster output +#' downscale_output <- readRDS("path/to/downscale_output.rds") +#' +#' # Slot in as argument to the aggregate function +#' result <- aggregate(downscale_output, polygon_data) +#' print(result) +#' } + +aggregate <- function(downscale_output, polygon_data, func = 'mean'){ + # check availability of optional packages + missing_pkgs <- c() + if (!requireNamespace("sf", quietly = TRUE)) { + missing_pkgs <- c(missing_pkgs, "sf") + } + if (!requireNamespace("exactextractr", quietly = TRUE)) { + missing_pkgs <- c(missing_pkgs, "exactextractr") + } + if (!requireNamespace("raster", quietly = TRUE)) { + missing_pkgs <- c(missing_pkgs, "raster") + } + if (length(missing_pkgs) > 0) { + PEcAn.logger::logger.severe( + "Package(s)", missing_pkgs, + "needed by PEcAnAssimSequential::aggregate() but not installed") + } + + grand_TTL <- 0 + if (sf::st_crs(downscale_output$maps$ensemble1) != sf::st_crs(polygon_data)) { + stop("CRS of downscale_output and polygon_data must match.") + } + + # Perform spatial operations on each raster + for (name in names(downscale_output$maps)) { + raster_data <- downscale_output$maps[[name]] + agg_values <- exactextractr::exact_extract(raster_data, polygon_data, fun = func) + + polygon_data[[paste0(name, "_", func)]] <- agg_values + grand_TTL = grand_TTL + agg_values + } + if(func == 'mean'){ + grand_TTL = grand_TTL/length(downscale_output$maps) + } + polygon_data[[paste0("TTL_", func)]] <- grand_TTL + return (polygon_data) +} \ No newline at end of file diff --git a/modules/assim.sequential/R/assess.params.R b/modules/assim.sequential/R/assess.params.R index 515f79b38ac..b81eda4b93d 100644 --- a/modules/assim.sequential/R/assess.params.R +++ b/modules/assim.sequential/R/assess.params.R @@ -31,7 +31,7 @@ assessParams <- function(dat, Xt, wts = NULL, mu_f_TRUE = NULL, P_f_TRUE = NULL) if(is.null(wts)){ mufT <- apply(Xt,2,mean) - PfT <- cov(Xt) + PfT <- stats::cov(Xt) }else{ mufT <- apply(Xt,2,stats::weighted.mean,wts) PfT <- stats::cov.wt(Xt,wts)$cov @@ -43,92 +43,92 @@ assessParams <- function(dat, Xt, wts = NULL, mu_f_TRUE = NULL, P_f_TRUE = NULL) } - par(mfrow=c(2,3)) + graphics::par(mfrow=c(2,3)) apply(eigen_save,2,graphics::plot,typ='l',main='Eigen Value') for(i in seq(1,length(iPf),7)){ graphics::plot(dat[,iPf[i]],typ='l',main='Variance of Pf') } for(i in 1:length(muf)){ graphics::plot(dat[,imuf[i]],typ='l',main=paste('muf',i)) - abline(h=mufT[i],col='red') + graphics::abline(h=mufT[i],col='red') } Xt_use <- Xt rownames(Xt_use)<-colnames(Xt_use) <- NULL - corrplot::corrplot(cov2cor((PfT)),main='correlation T') - corrplot::corrplot(cov2cor(cov(Xt_use)),main='correlation estimate') + corrplot::corrplot(stats::cov2cor((PfT)),main='correlation T') + corrplot::corrplot(stats::cov2cor(stats::cov(Xt_use)),main='correlation estimate') - mufCI <- apply(dat[,imuf],2,quantile,c(0.025,0.975)) - mufTCI <- apply(Xt,2,quantile,c(0.025,0.975)) + mufCI <- apply(dat[,imuf],2,stats::quantile,c(0.025,0.975)) + mufTCI <- apply(Xt,2,stats::quantile,c(0.025,0.975)) - par(mfrow=c(1,1)) + graphics::par(mfrow=c(1,1)) graphics::plot(mufT,muf,pch=19,ylim=range(mufCI),xlim=range(mufTCI)) - abline(a=0,b=1,lty=2) + graphics::abline(a=0,b=1,lty=2) for(i in 1:length(muf)){ - lines(mufTCI[,i],rep(as.vector(muf)[i],2),col=i,lwd=2) - lines(rep(as.vector(mufT)[i],2),mufCI[,i],col=i,lwd=2) + graphics::lines(mufTCI[,i],rep(as.vector(muf)[i],2),col=i,lwd=2) + graphics::lines(rep(as.vector(mufT)[i],2),mufCI[,i],col=i,lwd=2) } #muf mufT scatter plot - par(mfrow=c(2,2)) + graphics::par(mfrow=c(2,2)) for(i in 1:(length(imuf)-1)){ graphics::plot(dat[,i],dat[,i+1],xlab=paste('mu', i),ylab=paste('mu', i+1)) #points(mu_f_TRUE[i],mu_f_TRUE[i+1],cex=3,col=2,pch=18) - points(muf[i],muf[i+1],cex=3,col=3,pch=19) - points(mufT[i],mufT[i+1],cex=3,col=4,pch=20) + graphics::points(muf[i],muf[i+1],cex=3,col=3,pch=19) + graphics::points(mufT[i],mufT[i+1],cex=3,col=4,pch=20) } - plot.new() - legend("topleft",legend=c("post","sampT"),col=3:4,pch = 19:20) + graphics::plot.new() + graphics::legend("topleft",legend=c("post","sampT"),col=3:4,pch = 19:20) #legend("topleft",legend=c("TRUE","post","sampT"),col=2:4,pch = 18:20) graphics::boxplot(Xt,xlab='State Variables',ylab='X') - points(muf,col='red',pch=19) - legend("topleft",legend=c("muf"),col='red',pch = 19) + graphics::points(muf,col='red',pch=19) + graphics::legend("topleft",legend=c("muf"),col='red',pch = 19) #cor(dat[,1:6]) #iPf <- grep("pf", colnames(dat)) #Pf <- matrix(colMeans(dat[, iPf]),ncol(Xt),ncol(Xt)) - PfCI <- apply(dat[,iPf],2,quantile,c(0.025,0.975)) + PfCI <- apply(dat[,iPf],2,stats::quantile,c(0.025,0.975)) diag.stopper <- diag(length(muf)) - par(mfrow=c(1,1)) + graphics::par(mfrow=c(1,1)) graphics::plot(PfT,Pf,ylim=range(PfCI),pch=19,xlab='Pf Ensemble (True)',ylab='Pf Estimated (tobit2space)') - abline(0,1,lty=2) + graphics::abline(0,1,lty=2) for(i in 1:length(Pf)){ - lines(rep(as.vector(PfT)[i],2),PfCI[,i],col=i,lwd=2) + graphics::lines(rep(as.vector(PfT)[i],2),PfCI[,i],col=i,lwd=2) if(diag.stopper[i]==1){ - points(PfT[i],Pf[i],cex=2,pch = 7) + graphics::points(PfT[i],Pf[i],cex=2,pch = 7) } } - legend('topleft','variance',pch = 7,cex=2) + graphics::legend('topleft','variance',pch = 7,cex=2) diag.stopper2 <- diag.stopper+1 diag(diag.stopper2) <- 0 - graphics::plot(cov2cor(PfT)[which(diag.stopper2==1)], - cov2cor(Pf)[which(diag.stopper2==1)],pch=19, + graphics::plot(stats::cov2cor(PfT)[which(diag.stopper2==1)], + stats::cov2cor(Pf)[which(diag.stopper2==1)],pch=19, ylab = 'Pf', xlab = 'Pft', main = 'Correlations') - abline(a=0,b=1,lty=2) + graphics::abline(a=0,b=1,lty=2) - corrCI <- apply(dat[,iPf[which(diag.stopper2!=0)]],2,quantile,c(0.025,0.975)) + corrCI <- apply(dat[,iPf[which(diag.stopper2!=0)]],2,stats::quantile,c(0.025,0.975)) - par(mfrow=c(1,1)) + graphics::par(mfrow=c(1,1)) graphics::plot(PfT[which(diag.stopper2!=0)],Pf[which(diag.stopper2!=0)], ylim=range(corrCI),pch=19,xlab='Pf Ensemble (True)', ylab='Pf Estimated (tobit2space)', main='Non-Diagonal Covariance') - abline(a=0,b=1,lty=2) + graphics::abline(a=0,b=1,lty=2) for(i in 1:length(Pf)){ if(diag.stopper2[i]==1){ - lines(rep(as.vector(PfT)[i],2),PfCI[,i],col=i,lwd=2) + graphics::lines(rep(as.vector(PfT)[i],2),PfCI[,i],col=i,lwd=2) } } - par(mfrow=c(1,1)) + graphics::par(mfrow=c(1,1)) graphics::plot(diag(PfT)-diag(Pf),xlab='State Variable',pch=19, cex=2,main='Which variance changed the most?') diff --git a/modules/assim.sequential/R/build_X.R b/modules/assim.sequential/R/build_X.R index bc61166e797..1dd91638d8d 100644 --- a/modules/assim.sequential/R/build_X.R +++ b/modules/assim.sequential/R/build_X.R @@ -20,7 +20,7 @@ build_X <- function(out.configs, settings, new.params, nens, read_restart_times, outdir, t = 1, var.names, my.read_restart, restart_flag = FALSE){ if(t == 1 & restart_flag){ reads <- - furrr::future_pmap(list(out.configs %>% `class<-`(c("list")), settings, new.params),function(configs,settings,siteparams) { + furrr::future_pmap(list(out.configs %>% `class<-`(c("list")), settings, new.params),function(configs,my_settings,siteparams) { # Loading the model package - this is required bc of the furrr #library(paste0("PEcAn.",settings$model$type), character.only = TRUE) #source("~/pecan/models/sipnet/R/read_restart.SIPNET.R") @@ -31,9 +31,9 @@ build_X <- function(out.configs, settings, new.params, nens, read_restart_times, X_tmp[[i]] <- do.call( my.read_restart, args = list( outdir = outdir, - runid = settings$runs$id[i] %>% as.character(), + runid = my_settings$run$id[i] %>% as.character(), stop.time = read_restart_times[t+1], - settings = settings, + settings = my_settings, var.names = var.names, params = siteparams[[i]] ) @@ -45,7 +45,7 @@ build_X <- function(out.configs, settings, new.params, nens, read_restart_times, }else{ reads <- - furrr::future_pmap(list(out.configs %>% `class<-`(c("list")), settings, new.params),function(configs,settings,siteparams) { + furrr::future_pmap(list(out.configs %>% `class<-`(c("list")), settings, new.params),function(configs,my_settings,siteparams) { X_tmp <- vector("list", 2) @@ -55,7 +55,6 @@ build_X <- function(out.configs, settings, new.params, nens, read_restart_times, outdir = outdir, runid = configs$runs$id[i] %>% as.character(), stop.time = read_restart_times[t+1], - settings = settings, var.names = var.names, params = siteparams[[i]] ) diff --git a/modules/assim.sequential/R/downscale_function.R b/modules/assim.sequential/R/downscale_function.R new file mode 100644 index 00000000000..317926eba1e --- /dev/null +++ b/modules/assim.sequential/R/downscale_function.R @@ -0,0 +1,366 @@ +##' @title Preprocess Data for Downscaling +##' @name SDA_downscale_preprocess +##' @author Sambhav Dixit +##' +##' @param data_path Character. File path for .rds containing ensemble data. +##' @param coords_path Character. File path for .csv file containing the site coordinates, with columns named "lon" and "lat". +##' @param date Date. If SDA site run, format is yyyy/mm/dd; if NEON, yyyy-mm-dd. Restricted to years within the file supplied to 'data_path'. +##' @param carbon_pool Character. Carbon pool of interest. Name must match the carbon pool name found within the file supplied to 'data_path'. +##' @details This function ensures that the specified date and carbon pool are present in the input data. It also checks the validity of the site coordinates and aligns the number of rows between site coordinates and carbon data. +##' +##' @description This function reads and checks the input data, ensuring that the required date and carbon pool exist, and that the site coordinates are valid. +##' +##' @return A list containing The read .rds data , The cleaned site coordinates, and the preprocessed carbon data. + +SDA_downscale_preprocess <- function(data_path, coords_path, date, carbon_pool) { + # Read the input data and site coordinates + input_data <- readRDS(data_path) + site_coordinates <- readr::read_csv(coords_path) + + # Convert input_data names to Date objects + input_date_names <- lubridate::ymd(names(input_data)) + names(input_data) <- input_date_names + + # Convert the input date to a Date object + standard_date <- lubridate::ymd(date) + + # Ensure the date exists in the input data + if (!standard_date %in% input_date_names) { + stop(paste("Date", date, "not found in the input data.")) + } + + # Extract the carbon data for the specified focus year + index <- which(input_date_names == standard_date) + data <- input_data[[index]] + + # Ensure the carbon pool exists in the input data + if (!carbon_pool %in% names(data)) { + stop(paste("Carbon pool", carbon_pool, "not found in the input data.")) + } + + carbon_data <- as.data.frame(t(data[which(names(data) == carbon_pool)])) + names(carbon_data) <- paste0("ensemble", seq(ncol(carbon_data))) + + # Ensure site coordinates have 'lon' and 'lat' columns + if (!all(c("lon", "lat") %in% names(site_coordinates))) { + stop("Site coordinates must contain 'lon' and 'lat' columns.") + } + + # Ensure the number of rows in site coordinates matches the number of rows in carbon data + if (nrow(site_coordinates) != nrow(carbon_data)) { + message("Number of rows in site coordinates does not match the number of rows in carbon data.") + if (nrow(site_coordinates) > nrow(carbon_data)) { + message("Truncating site coordinates to match carbon data rows.") + site_coordinates <- site_coordinates[1:nrow(carbon_data), ] + } else { + message("Truncating carbon data to match site coordinates rows.") + carbon_data <- carbon_data[1:nrow(site_coordinates), ] + } + } + + message("Preprocessing completed successfully.") + return(list(input_data = input_data, site_coordinates = site_coordinates, carbon_data = carbon_data)) +} + +##' @noRd +##' +##' @title Create folds function +##' @name .create_folds +##' @author Sambhav Dixit +##' +##' @param y Vector. A vector of outcome data or indices. +##' @param k Numeric. The number of folds to create. +##' @param list Logical. If TRUE, returns a list of fold indices. If FALSE, returns a vector. +##' @param returnTrain Logical. If TRUE, returns indices for training sets. If FALSE, returns indices for test sets. +##' @details This function creates k-fold indices for cross-validation. It can return either training or test set indices, and the output can be in list or vector format. +##' +##' @description This function generates k-fold indices for cross-validation, allowing for flexible output formats. +##' +##' @return A list of k elements (if list = TRUE), each containing indices for a fold, or a vector of indices (if list = FALSE). + +.create_folds <- function(y, k, list = TRUE, returnTrain = FALSE) { + n <- length(y) + indices <- seq_len(n) + folds <- split(indices, cut(seq_len(n), breaks = k, labels = FALSE)) + + if (!returnTrain) { + folds <- folds # Test indices are already what we want + } else { + folds <- lapply(folds, function(x) indices[-x]) # Return training indices + } + + if (!list) { + folds <- unlist(folds) + } + + return(folds) +} + +##' @title SDA Downscale Function +##' @name SDA_downscale +##' @author Joshua Ploshay, Sambhav Dixit +##' +##' @param preprocessed List. Preprocessed data returned as an output from the SDA_downscale_preprocess function. +##' @param date Date. If SDA site run, format is yyyy/mm/dd; if NEON, yyyy-mm-dd. Restricted to years within file supplied to 'preprocessed' from the 'data_path'. +##' @param carbon_pool Character. Carbon pool of interest. Name must match carbon pool name found within file supplied to 'preprocessed' from the 'data_path'. +##' @param covariates SpatRaster stack. Used as predictors in downscaling. Layers within stack should be named. Recommended that this stack be generated using 'covariates' instructions in assim.sequential/inst folder +##' @param model_type Character. Either "rf" for Random Forest or "cnn" for Convolutional Neural Network. Default is Random Forest. +##' @param seed Numeric or NULL. Optional seed for random number generation. Default is NULL. +##' @details This function will downscale forecast data to unmodeled locations using covariates and site locations +##' +##' @description This function uses either Random Forest or Convolutional Neural Network model based on the model_type parameter. +##' +##' @return A list containing the training and testing data sets, models, predicted maps for each ensemble member, and predictions for testing data. + +SDA_downscale <- function(preprocessed, date, carbon_pool, covariates, model_type = "rf", seed = NULL) { + carbon_data <- preprocessed$carbon_data + + # Convert site coordinates to SpatVector + site_coordinates <- terra::vect(preprocessed$site_coordinates, geom = c("lon", "lat"), crs = "EPSG:4326") + + # Extract predictors from covariates raster using site coordinates + predictors <- as.data.frame(terra::extract(covariates, site_coordinates, ID = FALSE)) + + # Dynamically get covariate names + covariate_names <- names(predictors) + + # Create a single data frame with all predictors and ensemble data + full_data <- cbind(carbon_data, predictors) + + # Split the observations into training and testing sets + if (!is.null(seed)) { + set.seed(seed) # Only set seed if provided + } + sample <- sample(1:nrow(full_data), size = round(0.75 * nrow(full_data))) + train_data <- full_data[sample, ] + test_data <- full_data[-sample, ] + + # Prepare data for both RF and CNN + x_data <- as.matrix(full_data[, covariate_names]) + y_data <- as.matrix(carbon_data) + + # Calculate scaling parameters from all data + scaling_params <- list( + mean = colMeans(x_data), + sd = apply(x_data, 2, stats::sd) + ) + + # Normalize the data + x_data_scaled <- scale(x_data, center = scaling_params$mean, scale = scaling_params$sd) + + # Split into training and testing sets + x_train <- x_data_scaled[sample, ] + x_test <- x_data_scaled[-sample, ] + y_train <- y_data[sample, ] + y_test <- y_data[-sample, ] + + # Initialize lists for outputs + models <- list() + maps <- list() + predictions <- list() + + if (model_type == "rf") { + for (i in seq_along(carbon_data)) { + ensemble_col <- paste0("ensemble", i) + formula <- stats::as.formula(paste(ensemble_col, "~", paste(covariate_names, collapse = " + "))) + models[[i]] <- randomForest::randomForest(formula, + data = train_data, + ntree = 1000, + na.action = stats::na.omit, + keep.forest = TRUE, + importance = TRUE) + + maps[[i]] <- terra::predict(covariates, model = models[[i]], na.rm = TRUE) + predictions[[i]] <- stats::predict(models[[i]], test_data) + } + } else if (model_type == "cnn") { + # Define k_folds and num_bags + k_folds <- 5 + num_bags <- 5 + + # Reshape input data for CNN + x_train <- keras3::array_reshape(x_train, c(nrow(x_train), 1, ncol(x_train))) + x_test <- keras3::array_reshape(x_test, c(nrow(x_test), 1, ncol(x_test))) + + for (i in seq_along(carbon_data)) { + all_models <- list() + + # Create k-fold indices + fold_indices <- .create_folds(y = seq_len(nrow(x_train)), k = k_folds, list = TRUE, returnTrain = FALSE) + + #initialise operations for each fold + for (fold in 1:k_folds) { + cat(sprintf("Processing ensemble %d, fold %d of %d\n", i, fold, k_folds)) + + # Split data into training and validation sets for this fold + train_indices <- setdiff(seq_len(nrow(x_train)), fold_indices[[fold]]) + val_indices <- fold_indices[[fold]] + + x_train_fold <- x_train[train_indices, , drop = FALSE] + y_train_fold <- y_train[train_indices, i] + x_val_fold <- x_train[val_indices, , drop = FALSE] + y_val_fold <- y_train[val_indices, i] + + # Create bagged models for this fold + fold_models <- list() + for (bag in 1:num_bags) { + # Create bootstrap sample + bootstrap_indices <- sample(1:nrow(x_train_fold), size = nrow(x_train_fold), replace = TRUE) + x_train_bag <- x_train_fold[bootstrap_indices, ] + y_train_bag <- y_train_fold[bootstrap_indices] + + # Define the CNN model architecture + # Used dual batch normalization and dropout as the first set of batch normalization and + model <- keras3::keras_model_sequential() |> + # Layer Reshape : Reshape to fit target shape for the convolutional layer + keras3::layer_reshape(target_shape = c(ncol(x_train), 1, 1), input_shape = ncol(x_train)) |> + # 1D Convolutional layer: Extracts local features from input data + keras3::layer_conv_2d( + filters = 32, + kernel_size = c(3, 1), + activation = 'relu', + padding = 'same' + ) |> + # Flatten: Converts 3D output to 1D for dense layer input + keras3::layer_flatten() |> + # Dense layer: Learns complex combinations of features + keras3::layer_dense( + units = 64, + activation = 'relu', + kernel_regularizer = keras3::regularizer_l2(0.01) + ) |> + # Batch normalization: Normalizes layer inputs, stabilizes learning, reduces internal covariate shift + keras3::layer_batch_normalization() |> + # Dropout: Randomly sets some of inputs to 0, reducing overfitting and improving generalization + keras3::layer_dropout(rate = 0.3) |> + # Dense layer: Learns complex combinations of features + keras3::layer_dense( + units = 32, + activation = 'relu', + kernel_regularizer = keras3::regularizer_l2(0.01) + ) |> + # Batch normalization: Further stabilizes learning in deeper layers + keras3::layer_batch_normalization() |> + # Dropout: Additional regularization to prevent overfitting in final layer + keras3::layer_dropout(rate = 0.3) |> + # Output layer: Single neuron for regression prediction + keras3::layer_dense( + units = 1, + kernel_regularizer = keras3::regularizer_l2(0.01) + ) + + # Learning rate scheduler + lr_schedule <- keras3::learning_rate_schedule_exponential_decay( + initial_learning_rate = 0.001, + decay_steps = 1000, + decay_rate = 0.9 + ) + + # Early stopping callback + early_stopping <- keras3::callback_early_stopping( + monitor = 'loss', + patience = 10, + restore_best_weights = TRUE + ) + + # Compile the model + model |> keras3::compile( + loss = 'mean_squared_error', + optimizer = keras3::optimizer_adam(learning_rate = lr_schedule), + metrics = c('mean_absolute_error') + ) + + # Train the model + model |> keras3::fit( + x = x_train_bag, + y = y_train_bag, + epochs = 500, + batch_size = 32, + callbacks = list(early_stopping), + verbose = 0 + ) + + # Store the trained model for this bag in the fold_models list + fold_models[[bag]] <- model + } + + # Add fold models to all_models list + all_models <- c(all_models, fold_models) + } + + # Store all models for this ensemble + models[[i]] <- all_models + + # Use all models for predictions + cnn_ensemble_predict <- function(models, newdata, scaling_params) { + newdata <- scale(newdata, center = scaling_params$mean, scale = scaling_params$sd) + predictions <- sapply(models, function(m) stats::predict(m, newdata)) + return(rowMeans(predictions)) + } + + # Create a prediction raster from covariates + prediction_rast <- terra::rast(covariates) + + # Generate spatial predictions using the trained model + maps[[i]] <- terra::predict(prediction_rast, model = models[[i]], + fun = cnn_ensemble_predict, + scaling_params = scaling_params) + + # Make predictions on held-out test data + predictions[[i]] <- cnn_ensemble_predict(models[[i]], x_data[-sample, ], scaling_params) + + } + } else { + stop("Invalid model_type. Please choose either 'rf' for Random Forest or 'cnn' for Convolutional Neural Network.") + } + + # Organize the results into a single output list + downscale_output <- list( + data = list(training = train_data, testing = test_data), + models = models, + maps = maps, + predictions = predictions, + scaling_params = scaling_params + ) + + # Rename each element of the output list with appropriate ensemble numbers + for (i in seq_along(carbon_data)) { + names(downscale_output$models)[i] <- paste0("ensemble", i) + names(downscale_output$maps)[i] <- paste0("ensemble", i) + names(downscale_output$predictions)[i] <- paste0("ensemble", i) + } + + return(downscale_output) +} + +##' @title Calculate Metrics for Downscaling Results +##' @name SDA_downscale_metrics +##' @author Sambhav Dixit +##' +##' @param downscale_output List. Output from the SDA_downscale function, containing data, models, maps, and predictions for each ensemble. +##' @param carbon_pool Character. Name of the carbon pool used in the downscaling process. +##' +##' @details This function calculates performance metrics for the downscaling results. It computes Mean Squared Error (MSE), Mean Absolute Error (MAE), and R-squared for each ensemble. The function uses the actual values from the testing data and the predictions generated during the downscaling process. +##' +##' @description This function takes the output from the SDA_downscale function and computes various performance metrics for each ensemble. It provides a way to evaluate the accuracy of the downscaling results without modifying the main downscaling function. +##' +##' @return A list of metrics for each ensemble, where each element contains MAE , MSE ,R_squared ,actual values from testing data and predicted values for the testing data + +SDA_downscale_metrics <- function(downscale_output, carbon_pool) { + metrics <- list() + + for (i in 1:length(downscale_output$data)) { + actual <- downscale_output$data[[i]]$testing[[paste0(carbon_pool, "_ens", i)]] + predicted <- downscale_output$predictions[[i]] + + mse <- mean((actual - predicted)^2) + mae <- mean(abs(actual - predicted)) + r_squared <- 1 - sum((actual - predicted)^2) / sum((actual - mean(actual))^2) + + metrics[[i]] <- list(MSE = mse, MAE = mae, R_squared = r_squared, actual = actual, predicted = predicted) + } + + names(metrics) <- paste0("ensemble", seq_along(metrics)) + + return(metrics) +} diff --git a/modules/assim.sequential/R/downscale_function_hrly.R b/modules/assim.sequential/R/downscale_function_hrly.R new file mode 100644 index 00000000000..25da4c62150 --- /dev/null +++ b/modules/assim.sequential/R/downscale_function_hrly.R @@ -0,0 +1,103 @@ +#' SDA Downscale Function for Hourly Data +#' +#' This function uses the randomForest model to downscale forecast data (hourly) to unmodeled locations using covariates and site locations +#' +#' @author Harunobu Ishii +#' @param nc_file In quotes, file path for .nc containing ensemble data. +#' @param coords In quotes, file path for .csv file containing the site coordinates, columns named "lon" and "lat". +#' @param yyyy In string, format is yyyy(year of interest) +#' @param covariates SpatRaster stack, used as predictors in randomForest. Layers within stack should be named. Recommended that this stack be generated using 'covariates' instructions in assim.sequential/inst folder +#' @return It returns the `downscale_output` list containing lists for the training and testing data sets, models, and predicted maps for each ensemble member. +#' @export + +SDA_downscale_hrly <- function(nc_file, coords, yyyy, covariates){ + + # Read the input data and site coordinates + nc_data <- ncdf4::nc_open(nc_file) + on.exit(ncdf4::nc_close(nc_data)) + input_data <- ncdf4::ncvar_get(nc_data, "NEE") + covariate_names <- names(covariates) + + + # Extract time and units + time <- nc_data$dim$time$vals + time_units <- nc_data$dim$time$units + time_origin_str <- substr(time_units, 12, 31) + + # Check if timezone is specified in the time units string + if (grepl("UTC|GMT", time_units)) { + time_origin <- lubridate::ymd_hm(time_origin_str, tz = "UTC") + } else if (grepl("EST", time_units)) { + time_origin <- lubridate::ymd_hm(time_origin_str, tz = "EST") + } else { + time_origin <- lubridate::ymd_hm(time_origin_str, tz = "UTC") # Default to UTC if not specified + } + + # Timereadable + if (grepl("hours", time_units)) { + time_readable <- time_origin + lubridate::dhours(time) + } else if (grepl("seconds", time_units)) { + time_readable <- time_origin + lubridate::dseconds(time) + } else { + stop("Unsupported time units") + } + + # Extract predictors from covariates raster using site coordinates + site_coordinates <- terra::vect(readr::read_csv(coords), geom=c("lon", "lat"), crs="EPSG:4326") + predictors <- as.data.frame(terra::extract(covariates, site_coordinates,ID = FALSE)) + + downscale_output<- list() + + # Train & Test split + sample <- sample(1:nrow(predictors), size = round(0.75*nrow(predictors))) + + # Predict for each time stamp of the year selected + time_indices <- which(year(time_readable) == yyyy) + for (index in time_indices) { + data <- input_data[index, , ] + carbon_data <- as.data.frame(data) + names(carbon_data) <- paste0("ensemble",seq(1:ncol(carbon_data))) + + # Combine carbon data and covariates/predictors and split into training/test + full_data <- cbind(carbon_data, predictors) + train_data <- full_data[sample, ] + test_data <- full_data[-sample, ] + + # Combine each ensemble member with all predictors + models <- list() + maps <- list() + predictions <- list() + ensembles <- list() + for (i in seq_along(carbon_data)) { + ensemble_col <- paste0("ensemble", i) + formula <- stats::as.formula(paste(ensemble_col, "~", paste(covariate_names, collapse = " + "))) + models[[i]] <- randomForest::randomForest(formula, + data = train_data, + ntree = 1000, + na.action = stats::na.omit, + keep.forest = TRUE, + importance = TRUE) + + maps[[i]] <- terra::predict(covariates, model = models[[i]], na.rm = TRUE) + predictions[[i]] <- stats::predict(models[[i]], test_data) + } + + # Organize the results into a single output list + curr_downscaled <- list( data = list(training = train_data, testing = test_data), + models = models, + maps = maps, + predictions = predictions + ) + + # Rename each element of the output list with appropriate ensemble numbers + for (i in 1:length(curr_downscaled$data)) { + names(curr_downscaled$data[[i]]) <- paste0("ensemble", seq(1:ncol(carbon_data))) + } + names(curr_downscaled$models) <- paste0("ensemble", seq(1:ncol(carbon_data))) + names(curr_downscaled$maps) <- paste0("ensemble", seq(1:ncol(carbon_data))) + names(curr_downscaled$predictions) <- paste0("ensemble", seq(1:ncol(carbon_data))) + + downscale_output[[as.character(time_readable[index])]]<-curr_downscaled + } + return(downscale_output) +} diff --git a/modules/assim.sequential/R/get_ensemble_weights.R b/modules/assim.sequential/R/get_ensemble_weights.R index d5eabb5de27..8423e2eab86 100644 --- a/modules/assim.sequential/R/get_ensemble_weights.R +++ b/modules/assim.sequential/R/get_ensemble_weights.R @@ -23,7 +23,7 @@ get_ensemble_weights <- function(settings, time_do){ ###-------------------------------------------------------------------### ### Loading Weights ### ###-------------------------------------------------------------------### - weight_file <- read.csv(settings$run$inputs$ensembleweights$path) + weight_file <- utils::read.csv(settings$run$inputs$ensembleweights$path) start_date <- settings$run$inputs$ensembleweights$start.date end_date <- settings$run$inputs$ensembleweights$end.date years_get <- lubridate::year(start_date):lubridate::year(end_date) #assuming year time step... would need to change for other analyses possibly going down the load.data path? diff --git a/modules/assim.sequential/R/hop_test.R b/modules/assim.sequential/R/hop_test.R index 326f80e5af6..4c79b437e30 100644 --- a/modules/assim.sequential/R/hop_test.R +++ b/modules/assim.sequential/R/hop_test.R @@ -4,6 +4,7 @@ ##' ##' @param settings SDA PEcAn settings object ##' @param nyear number of years to run hop test over +##' @param ens.runid run id. If not provided, is looked up from [settings$outdir]/runs.txt ##' ##' @description Hop test. This script tests that the model successfully reads it's own restart and can restart without loss of information. ##' @@ -20,13 +21,19 @@ hop_test <- function(settings, ens.runid = NULL, nyear){ ##### Regular Run ##### if(is.null(ens.runid)){ - run.write.configs(settings, write = settings$database$bety$write) + PEcAn.workflow::run.write.configs(settings, write = settings$database$bety$write) PEcAn.workflow::start_model_runs(settings, settings$database$bety$write) - ens.runid <- read.table(file.path(settings$rundir,'runs.txt')) + ens.runid <- utils::read.table(file.path(settings$rundir,'runs.txt')) } - ens <- read.output(runid = ens.runid, + if (!requireNamespace("PEcAn.utils", quietly = TRUE)) { + PEcAn.logger::logger.error( + "Can't find package 'PEcAn.utils',", + "needed by `PEcAnAssimSequential::hop_test()`.", + "Please install it and try again.") + } + ens <- PEcAn.utils::read.output(runid = ens.runid, outdir = file.path(settings$outdir,'out', ens.runid), start.year = lubridate::year(settings$run$start.date), end.year = lubridate::year(settings$run$end.date), @@ -45,8 +52,8 @@ hop_test <- function(settings, ens.runid = NULL, nyear){ ##### PEcAnAssimSequential::sda.enkf(settings = settings, obs.mean = obs.mean, obs.cov = obs.cov) - hop.runid <- read.table(file.path(settings$rundir,'runs.txt')) - hop.ens <- read.output(runid = hop.runid, + hop.runid <- utils::read.table(file.path(settings$rundir,'runs.txt')) + hop.ens <- PEcAn.utils::read.output(runid = hop.runid, outdir = file.path(settings$outdir,'out', hop.runid), start.year = lubridate::year(settings$run$start.date), end.year = reg_run_end, @@ -61,8 +68,8 @@ hop_test <- function(settings, ens.runid = NULL, nyear){ plot_years <- lubridate::year(settings$run$start.date):reg_run_end - pdf('hop_test_results.pdf') - par(mfrow=c(2,1)) + grDevices::pdf('hop_test_results.pdf') + graphics::par(mfrow=c(2,1)) for(p in seq_along(hop_var)){ hop_var_use <- unlist(hop_var[p]) @@ -73,24 +80,24 @@ hop_test <- function(settings, ens.runid = NULL, nyear){ ens[[hop_var_use]], pch=19,ylim=c(range(ens,hop.ens)), ylab = hop_var_use, xlab = 'Years') - points(plot_years, + graphics::points(plot_years, hop.ens[[hop_var_use]],col='red') - abline(v=year(settings$run$end.date),col='blue',lwd=2) - legend('topleft', c('Regular Run','Hop Run','Test Start'), pch=c(19,1,19),col=c('black','red','blue')) - title(paste('Hop Test Comparision',hop_var[p])) + graphics::abline(v=year(settings$run$end.date),col='blue',lwd=2) + graphics::legend('topleft', c('Regular Run','Hop Run','Test Start'), pch=c(19,1,19),col=c('black','red','blue')) + graphics::title(paste('Hop Test Comparision',hop_var[p])) - hop_cor <- cor(ens.plot,hop.ens.plot) + hop_cor <- stats::cor(ens.plot,hop.ens.plot) plot(ens.plot,hop.ens.plot, xlab = paste('Regular Run',hop_var_use), ylab = paste('Hop Run',hop_var_use),pch=19,cex=1.5) - abline(a=0,b=1,col='red',lwd=2) - legend('topleft',paste('Correlation =',hop_cor)) + graphics::abline(a=0,b=1,col='red',lwd=2) + graphics::legend('topleft',paste('Correlation =',hop_cor)) - title(paste('Hop Test Correlation',hop_var[p])) + graphics::title(paste('Hop Test Correlation',hop_var[p])) } - dev.off() + grDevices::dev.off() } diff --git a/modules/assim.sequential/R/load_data_paleon_sda.R b/modules/assim.sequential/R/load_data_paleon_sda.R index 42a85b4e1fa..a77596796ed 100644 --- a/modules/assim.sequential/R/load_data_paleon_sda.R +++ b/modules/assim.sequential/R/load_data_paleon_sda.R @@ -68,7 +68,7 @@ load_data_paleon_sda <- function(settings){ biomass2carbon <- 0.48 for(i in seq_along(format_id)){ - input.list[[i]] <- db.query(paste("SELECT * FROM inputs WHERE site_id =",site$id ," AND format_id = ",format_id[[i]]), con) + input.list[[i]] <- PEcAn.DB::db.query(paste("SELECT * FROM inputs WHERE site_id =",site$id ," AND format_id = ",format_id[[i]]), con) input.id[[i]] <- input.list[[i]]$id data.path <- PEcAn.DB::query.file.path(input.id[[i]], settings$host$name, con) @@ -118,8 +118,15 @@ load_data_paleon_sda <- function(settings){ ### Map species to model specific PFTs if(any(var.names == 'AGB.pft')){ - spp_id <- match_species_id(unique(dataset$species_id),format_name = 'usda', con) - pft_mat <- match_pft(spp_id$bety_species_id, settings$pfts, + # this is the only code path that uses data.land, so we check now instead of at top of function + if (!requireNamespace("PEcAn.data.land", quietly = TRUE)) { + PEcAn.logger::logger.error( + "Can't find package 'PEcAn.data.land',", + "needed by `PEcAnAssimSequential::load_data_paleon_sda()`.", + "Please install it and try again.") + } + spp_id <- PEcAn.data.land::match_species_id(unique(dataset$species_id),format_name = 'usda', con) + pft_mat <- PEcAn.data.land::match_pft(spp_id$bety_species_id, settings$pfts, con = con, allow_missing = TRUE) x <- paste0('AGB.pft.', pft_mat$pft) @@ -150,7 +157,7 @@ load_data_paleon_sda <- function(settings){ mean_mat <- reshape2::dcast(melt.next, arguments2, mean) iter_mat <- reshape2::acast(melt.next, arguments3, mean) - cov.test <- apply(iter_mat,3,function(x){cov(x)}) + cov.test <- apply(iter_mat,3,function(x){stats::cov(x)}) for(t in seq_along(obs.times)){ obs.mean.tmp[[t]] <- mean_mat[mean_mat[,time.type]==obs.times[t], -c(1)] #THIS WONT WORK IF TIMESTEP ISNT ANNUAL @@ -184,20 +191,20 @@ load_data_paleon_sda <- function(settings){ sp::proj4string(coords) <- sp::CRS('+proj=longlat +ellps=WGS84') ### site utm coordinates - utm <- sp::spTransform(coords, CRS("+proj=utm +zone=18N ellps=WGS84")) + utm <- sp::spTransform(coords, sp::CRS("+proj=utm +zone=18N ellps=WGS84")) utm <- as.matrix(data.frame(utm)) ### find grid cell - site.x <- which(min(abs(ncvar_get(ncin, 'x') - utm[1])) == abs(ncvar_get(ncin, 'x') - utm[1])) - site.y <- which(min(abs(ncvar_get(ncin, 'y') - utm[2])) == abs(ncvar_get(ncin, 'y') - utm[2])) - years <- formatC(ncvar_get(ncin, 'year'), width = 4, format = "d", flag = "0") + site.x <- which(min(abs(ncdf4::ncvar_get(ncin, 'x') - utm[1])) == abs(ncdf4::ncvar_get(ncin, 'x') - utm[1])) + site.y <- which(min(abs(ncdf4::ncvar_get(ncin, 'y') - utm[2])) == abs(ncdf4::ncvar_get(ncin, 'y') - utm[2])) + years <- formatC(ncdf4::ncvar_get(ncin, 'year'), width = 4, format = "d", flag = "0") taxa <- names(ncin$var) if('other'%in%taxa) taxa <- taxa[-c(grep('other',taxa))] sims.keep <- array(NA,dim=c(length(taxa),length(ncin$dim$year$vals),length(ncin$dim$sample$vals))) for(n in seq_along(taxa)){ - taxa.start <- ncvar_get(ncin, taxa[n]) + taxa.start <- ncdf4::ncvar_get(ncin, taxa[n]) # input is a matrix 'sims', with rows as time and columns as MCMC samples sims.keep[n,,] <- taxa.start[site.x,site.y,,] @@ -216,7 +223,7 @@ load_data_paleon_sda <- function(settings){ for(lag in 1:(ntimes-1)){ covars <- c(covars, rowMeans(row.means.sims[(lag+1):ntimes, , drop = FALSE] * row.means.sims[1:(ntimes-lag), , drop = FALSE])) } - vars <- apply(row.means.sims, 1, var) # pointwise post variances at each time, might not be homoscedastic + vars <- apply(row.means.sims, 1, stats::var) # pointwise post variances at each time, might not be homoscedastic # nominal sample size scaled by ratio of variance of an average # under independence to variance of average of correlated values @@ -228,7 +235,7 @@ load_data_paleon_sda <- function(settings){ pecan.pfts <- as.character(lapply(settings$pfts, function(x) x[["name"]])) for(n in taxa){ - sims.start <- ncvar_get(ncin,n) + sims.start <- ncdf4::ncvar_get(ncin,n) # input is a matrix 'sims', with rows as time and columns as MCMC samples sims <- sims.start[site.x,site.y,,] @@ -265,7 +272,7 @@ load_data_paleon_sda <- function(settings){ rownames(sims.keep) <- colnames(mean.mat) obs.cov <- list() for(n in 1:length(ncin$dim$year$vals)){ - obs.cov[[n]] <- cov(t(sims.keep[,n,])) #* var.inf + obs.cov[[n]] <- stats::cov(t(sims.keep[,n,])) #* var.inf } names(obs.cov) <- paste0(years,'/12/31') @@ -274,7 +281,7 @@ load_data_paleon_sda <- function(settings){ which.keep <- list() for(n in obs.times){ - min.vec <- na.omit(as.numeric(n) - year(as.Date(names(obs.mean)))) + min.vec <- stats::na.omit(as.numeric(n) - year(as.Date(names(obs.mean)))) which.keep[[n]] <- which(min(abs(min.vec))==abs(min.vec)) obs.mean.tmp[[n]] <- obs.mean[[which.keep[[n]][1]]] obs.cov.tmp[[n]] <- obs.cov[[which.keep[[n]][1]]] @@ -287,7 +294,7 @@ load_data_paleon_sda <- function(settings){ ### Error Message for no data product if(format_id[[i]] != '1000000040' & format_id[[i]] != '1000000058'){ - PEcAn.logger::logger.severe('ERROR: This data format has not been added to this function (ツ)_/¯ ') + PEcAn.logger::logger.severe('ERROR: This data format has not been added to this function :(') } } diff --git a/modules/assim.sequential/R/matrix_operation.R b/modules/assim.sequential/R/matrix_operation.R new file mode 100644 index 00000000000..c5177c21ece --- /dev/null +++ b/modules/assim.sequential/R/matrix_operation.R @@ -0,0 +1,77 @@ +##' @title GrabFillMatrix +##' @name GrabFillMatrix +##' @author Dongchen Zhang +##' +##' @param M source matrix that will be either subtracted or filled in. +##' @param ind vector of index that of where to be subtracted or filled in. +##' @param M1 additional matrix used to fill in the source matrix, the default it NULL. +##' @details This function helps subtract or fill in a matrix given the index. +##' +##' @export +GrabFillMatrix <- function (M, ind, M1 = NULL) { + if (is.null(M1)) { + #grab a sub-matrix + m <- matrix(NA, length(ind), length(ind)) + for (i in seq_along(ind)) { + for (j in seq_along(ind)) { + m[i, j] <- M[ind[i], ind[j]] + } + } + } else { + #fill into a larger matrix + m <- M + for (i in seq_along(ind)) { + for (j in seq_along(ind)) { + m[ind[i], ind[j]] <- M1[i, j] + } + } + } + m +} + +##' @title matrix_network +##' @name matrix_network +##' @author Dongchen Zhang +##' +##' @param mat a boolean matrix representing the interactions between any sites. +##' +##' @return It returns lists of index representing each network. +##' +##' @export +matrix_network <- function (mat) { + #initialize the final returned list. + vec_group <- vector("list", ncol(mat)) + #initialize the vector for sites that are completed. + sites.complete <- c() + for (i in 1:ncol(mat)) { + #if we already completed the ith site, go next. + if (i %in% sites.complete) { + next + } + #initialize the arguments for the while loop. + vec <- c() + stop <- FALSE + inits <- i + #while loop + while (!stop) { + Inits <- c() + for (init in inits) { + Inits <- c(Inits, which(mat[init,])) + } + Inits <- Inits[which(!Inits %in% vec)] + vec <- sort(unique(c(vec, Inits))) + #if we don't have any new site that belongs to this network. + if (length(Inits) == 0) { + #then stop. + stop <- !stop + } else { + #else we initialize a new round of searching by new sites. + inits <- Inits + } + } + sites.complete <- c(sites.complete, vec) + vec_group[[i]] <- sort(vec) + } + vec_group[sapply(vec_group, is.null)] <- NULL + return(vec_group) +} \ No newline at end of file diff --git a/modules/assim.sequential/R/sample.parameters.R b/modules/assim.sequential/R/sample.parameters.R index a6a0c01c3da..4f745ba4ca1 100644 --- a/modules/assim.sequential/R/sample.parameters.R +++ b/modules/assim.sequential/R/sample.parameters.R @@ -12,15 +12,15 @@ sample.parameters <- function(ne, settings, con) { ## grab posteriors from database if (is.null(settings$assim.sequential$prior)) { - pft.id <- db.query(paste0("SELECT id from pfts where name = '", settings$pfts$pft$name, "'"), + pft.id <- PEcAn.DB::db.query(paste0("SELECT id from pfts where name = '", settings$pfts$pft$name, "'"), con) - priors <- db.query(paste0("SELECT * from posteriors where pft_id = ", pft.id), con) + priors <- PEcAn.DB::db.query(paste0("SELECT * from posteriors where pft_id = ", pft.id), con) ## by default, use the most recent posterior as the prior settings$assim.sequential$prior <- priors$id[which.max(priors$updated_at)] } ## load prior - prior.db <- db.query(paste0("SELECT * from dbfiles where container_type = 'Posterior' and container_id = ", + prior.db <- PEcAn.DB::db.query(paste0("SELECT * from dbfiles where container_type = 'Posterior' and container_id = ", settings$assim.sequential$prior), con) prior.db <- prior.db[grep("post.distns.Rdata", prior.db$file_name), ] load(file.path(prior.db$file_path, "post.distns.Rdata")) diff --git a/modules/assim.sequential/R/sda.enkf.R b/modules/assim.sequential/R/sda.enkf.R index 5c55ec2d8d7..20f1674d034 100644 --- a/modules/assim.sequential/R/sda.enkf.R +++ b/modules/assim.sequential/R/sda.enkf.R @@ -1,5 +1,10 @@ -##' @title sda.enkf -##' @name sda.enkf +##' State Variable Data Assimilation: Ensemble Kalman Filter +##' +##' Restart mode: Basic idea is that during a restart (primary case envisioned as an iterative forecast), +##' a new workflow folder is created and the previous forecast for the start_time is copied over. +##' During restart the initial run before the loop is skipped, with the info being populated from the previous run. +##' The function then dives right into the first Analysis, then continues on like normal. +##' ##' @author Michael Dietze and Ann Raiho \email{dietze@@bu.edu} ##' ##' @param settings PEcAn settings object @@ -10,11 +15,6 @@ ##' @param adjustment flag for using ensemble adjustment filter or not ##' @param restart Used for iterative updating previous forecasts. This is a list that includes ens.inputs, the list of inputs by ensemble member, params, the parameters, and old_outdir, the output directory from the previous workflow. These three things are needed to ensure that if a new workflow is started that ensemble members keep there run-specific met and params. See Details ##' -##’ @details -##’ Restart mode: Basic idea is that during a restart (primary case envisioned as an iterative forecast), a new workflow folder is created and the previous forecast for the start_time is copied over. During restart the initial run before the loop is skipped, with the info being populated from the previous run. The function then dives right into the first Analysis, then continues on like normal. -##' -##' @description State Variable Data Assimilation: Ensemble Kalman Filter -##' ##' ##' @return NONE ##' @export @@ -27,7 +27,12 @@ sda.enkf.original <- function(settings, obs.mean, obs.cov, IC = NULL, Q = NULL, "needed by `PEcAnAssimSequential::sda.enkf.original()`.", "Please install it and try again.") } - + if (!requireNamespace("PEcAn.visualization", quietly = TRUE)) { + PEcAn.logger::logger.error( + "Can't find package 'PEcAn.visualization',", + "needed by `PEcAnAssimSequential::sda.enkf.original()`.", + "Please install it and try again.") + } ymd_hms <- lubridate::ymd_hms hms <- lubridate::hms second <- lubridate::second @@ -135,11 +140,11 @@ sda.enkf.original <- function(settings, obs.mean, obs.cov, IC = NULL, Q = NULL, ### open database connection ### ###-------------------------------------------------------------------### if (write) { - con <- try(db.open(settings$database$bety), silent = TRUE) + con <- try(PEcAn.DB::db.open(settings$database$bety), silent = TRUE) if (is(con, "try-error")) { con <- NULL } else { - on.exit(db.close(con), add = TRUE) + on.exit(PEcAn.DB::db.close(con), add = TRUE) } } else { con <- NULL @@ -152,7 +157,7 @@ sda.enkf.original <- function(settings, obs.mean, obs.cov, IC = NULL, Q = NULL, workflow.id <- settings$workflow$id } else { # workflow.id <- -1 - settings <- check.workflow.settings(settings,con) + settings <- PEcAn.settings::check.workflow.settings(settings,con) workflow.id <- settings$workflow$id PEcAn.logger::logger.info("new workflow ID - ",workflow.id) } @@ -162,7 +167,7 @@ sda.enkf.original <- function(settings, obs.mean, obs.cov, IC = NULL, Q = NULL, ###-------------------------------------------------------------------### if (!is.null(con)) { # write ensemble first - result <- db.query( + result <- PEcAn.DB::db.query( paste( "INSERT INTO ensembles (runtype, workflow_id) ", "values ('EnKF', ", workflow.id, ") returning id", @@ -202,7 +207,7 @@ sda.enkf.original <- function(settings, obs.mean, obs.cov, IC = NULL, Q = NULL, # cumulative_ensemble_samples <- numeric(0) # # repeat{ # temporary SIPNET hack, I want to make sure sum <1 for SIPNET - get.parameter.samples(settings, ens.sample.method = settings$ensemble$method) ## Aside: if method were set to unscented, would take minimal changes to do UnKF + PEcAn.uncertainty::get.parameter.samples(settings, ens.sample.method = settings$ensemble$method) ## Aside: if method were set to unscented, would take minimal changes to do UnKF load(file.path(settings$outdir, "samples.Rdata")) ## loads ensemble.samples # cumulative_ensemble_samples <- rbind(cumulative_ensemble_samples,ensemble.samples$temperate.deciduous_SDA) # tot_check <- apply(ensemble.samples$temperate.deciduous_SDA[,c(20, 25,27)],1,sum) < 1 @@ -242,7 +247,7 @@ sda.enkf.original <- function(settings, obs.mean, obs.cov, IC = NULL, Q = NULL, } old_runs <- list.dirs(file.path(old_outdir,"out"),recursive=FALSE) ## select the _last_ nens - old_runs <- tail(old_runs,nens) + old_runs <- utils::tail(old_runs,nens) } @@ -254,7 +259,7 @@ sda.enkf.original <- function(settings, obs.mean, obs.cov, IC = NULL, Q = NULL, ## set RUN.ID if (!is.null(con)) { paramlist <- paste("EnKF:", i) - run.id[[i]] <- db.query( + run.id[[i]] <- PEcAn.DB::db.query( paste0( "INSERT INTO runs (", "model_id, site_id, ", @@ -475,16 +480,16 @@ sda.enkf.original <- function(settings, obs.mean, obs.cov, IC = NULL, Q = NULL, }) t1 <- 1 - pink <- col2rgb("deeppink") - alphapink <- rgb(pink[1], pink[2], pink[3], 180, max = 255) - green <- col2rgb("green") - alphagreen <- rgb(green[1], green[2], green[3], 75, max = 255) - blue <- col2rgb("blue") - alphablue <- rgb(blue[1], blue[2], blue[3], 75, max = 255) - purple <- col2rgb("purple") - alphapurple <- rgb(purple[1], purple[2], purple[3], 75, max = 255) - brown <- col2rgb("brown") - alphabrown <- rgb(brown[1], brown[2], brown[3], 75, max = 255) + pink <- grDevices::col2rgb("deeppink") + alphapink <- grDevices::rgb(pink[1], pink[2], pink[3], 180, max = 255) + green <- grDevices::col2rgb("green") + alphagreen <- grDevices::rgb(green[1], green[2], green[3], 75, max = 255) + blue <- grDevices::col2rgb("blue") + alphablue <- grDevices::rgb(blue[1], blue[2], blue[3], 75, max = 255) + purple <- grDevices::col2rgb("purple") + alphapurple <- grDevices::rgb(purple[1], purple[2], purple[3], 75, max = 255) + brown <- grDevices::col2rgb("brown") + alphabrown <- grDevices::rgb(brown[1], brown[2], brown[3], 75, max = 255) # weight matrix wt.mat <- matrix(NA, nrow = nens, ncol = nt) @@ -539,7 +544,7 @@ for(t in seq_len(nt)) { # obs <- which(!is.na(obs.mean[[t]])) mu.f <- as.numeric(apply(X, 2, mean, na.rm = TRUE)) - Pf <- cov(X) + Pf <- stats::cov(X) pmiss <- which(diag(Pf) == 0) diag(Pf)[pmiss] <- 0.1 ## hack for zero variance @@ -557,7 +562,7 @@ for(t in seq_len(nt)) { # # function(x) x[2]))))) #matches y to model # - choose <- na.omit(charmatch(colnames(X),names(obs.mean[[t]]))) + choose <- stats::na.omit(charmatch(colnames(X),names(obs.mean[[t]]))) Y <- unlist(obs.mean[[t]][choose]) Y[is.na(Y)] <- 0 @@ -585,7 +590,7 @@ for(t in seq_len(nt)) { # return(tmp) })) - Ybar <- Ybar[, na.omit(pmatch(colnames(X), colnames(Ybar)))] + Ybar <- Ybar[, stats::na.omit(pmatch(colnames(X), colnames(Ybar)))] YCI <- t(as.matrix(sapply(obs.cov[t1:t], function(x) { if (is.null(x)) { return(rep(NA, length(names.y))) @@ -596,7 +601,7 @@ for(t in seq_len(nt)) { # for (i in sample(x = 1:ncol(X), size = 2)) { t1 <- 1 Xbar <- plyr::laply(FORECAST[t1:t], function(x) { mean(x[, i], na.rm = TRUE) }) - Xci <- plyr::laply(FORECAST[t1:t], function(x) { quantile(x[, i], c(0.025, 0.975)) }) + Xci <- plyr::laply(FORECAST[t1:t], function(x) { stats::quantile(x[, i], c(0.025, 0.975)) }) plot(as.Date(obs.times[t1:t]), Xbar, @@ -608,17 +613,17 @@ for(t in seq_len(nt)) { # # observation / data if (i <= ncol(Ybar)) { - ciEnvelope(as.Date(obs.times[t1:t]), + PEcAn.visualization::ciEnvelope(as.Date(obs.times[t1:t]), as.numeric(Ybar[, i]) - as.numeric(YCI[, i]) * 1.96, as.numeric(Ybar[, i]) + as.numeric(YCI[, i]) * 1.96, col = alphagreen) - lines(as.Date(obs.times[t1:t]), as.numeric(Ybar[, i]), type = "l", + graphics::lines(as.Date(obs.times[t1:t]), as.numeric(Ybar[, i]), type = "l", col = "darkgreen", lwd = 2) } # forecast - ciEnvelope(as.Date(obs.times[t1:t]), Xci[, 1], Xci[, 2], col = alphablue) # col='lightblue') - lines(as.Date(obs.times[t1:t]), Xbar, col = "darkblue", type = "l", lwd = 2) + PEcAn.visualization::ciEnvelope(as.Date(obs.times[t1:t]), Xci[, 1], Xci[, 2], col = alphablue) # col='lightblue') + graphics::lines(as.Date(obs.times[t1:t]), Xbar, col = "darkblue", type = "l", lwd = 2) } } @@ -741,10 +746,10 @@ for(t in seq_len(nt)) { # set.seed(0) dat.tobit2space <- runMCMC(Cmcmc_tobit2space, niter = 50000, progressBar=TRUE) - pdf(file.path(outdir,paste0('assessParams',t,'.pdf'))) + grDevices::pdf(file.path(outdir,paste0('assessParams',t,'.pdf'))) assessParams(dat = dat.tobit2space[1000:5000,], Xt = X) - dev.off() + grDevices::dev.off() ## update parameters dat.tobit2space <- dat.tobit2space[1000:5000, ] @@ -759,7 +764,10 @@ for(t in seq_len(nt)) { # X.new <- matrix(colMeans(dat.tobit2space[,iycens]),nrow(X),ncol(X)) #Pf <- cov(X.new) - if(sum(diag(Pf)-diag(cov(X.new))) > 3 | sum(diag(Pf)-diag(cov(X.new))) < -3) logger.warn('Covariance in tobit2space model estimate is too different from original forecast covariance. Consider increasing your number of ensemble members.') + if (sum(diag(Pf) - diag(stats::cov(X.new))) > 3 + || sum(diag(Pf) - diag(stats::cov(X.new))) < -3) { + PEcAn.logger::logger.warn('Covariance in tobit2space model estimate is too different from original forecast covariance. Consider increasing your number of ensemble members.') + } ###-------------------------------------------------------------------### ### Generalized Ensemble Filter ### @@ -804,7 +812,7 @@ for(t in seq_len(nt)) { # y.censored = y.censored, r = solve(R)) inits.pred = list(q = diag(length(mu.f)), X.mod = as.vector(mu.f), - X = rnorm(length(mu.f),0,1)) # + X = stats::rnorm(length(mu.f),0,1)) # model_pred <- nimble::nimbleModel(tobit.model, data = data.tobit, dimensions = dimensions.tobit, constants = constants.tobit, inits = inits.pred, @@ -851,7 +859,7 @@ for(t in seq_len(nt)) { # Cmodel$r <- solve(R) inits.pred = list(q = diag(length(mu.f)), X.mod = as.vector(mu.f), - X = rnorm(ncol(X),0,1)) # + X = stats::rnorm(ncol(X),0,1)) # Cmodel$setInits(inits.pred) for(i in 1:length(y.ind)) { @@ -874,8 +882,8 @@ for(t in seq_len(nt)) { # Pa <- cov(dat[, iX]) Pa[is.na(Pa)] <- 0 - CI.X1[, t] <- quantile(dat[, iX[1]], c(0.025, 0.5, 0.975)) - CI.X2[, t] <- quantile(dat[, iX[2]], c(0.025, 0.5, 0.975)) + CI.X1[, t] <- stats::quantile(dat[, iX[1]], c(0.025, 0.5, 0.975)) + CI.X2[, t] <- stats::quantile(dat[, iX[2]], c(0.025, 0.5, 0.975)) mq <- dat[, iq] # Omega, Precision q.bar <- matrix(apply(mq, 2, mean), length(mu.f), length(mu.f)) # Mean Omega, Precision @@ -966,8 +974,14 @@ for(t in seq_len(nt)) { # # wt.mat[i,t]<-dmnorm_chol(FORECAST[[t]][i,], mu.a, solve(Pa), log = TRUE) # } - if(sum(mu.a - colMeans(X_a)) > 1 | sum(mu.a - colMeans(X_a)) < -1) logger.warn('Problem with ensemble adjustment (1)') - if(sum(diag(Pa) - diag(cov(X_a))) > 5 | sum(diag(Pa) - diag(cov(X_a))) < -5) logger.warn('Problem with ensemble adjustment (2)') + if (sum(mu.a - colMeans(X_a)) > 1 + || sum(mu.a - colMeans(X_a)) < -1) { + PEcAn.logger::logger.warn('Problem with ensemble adjustment (1)') + } + if (sum(diag(Pa) - diag(cov(X_a))) > 5 + || sum(diag(Pa) - diag(cov(X_a))) < -5) { + PEcAn.logger::logger.warn('Problem with ensemble adjustment (2)') + } analysis <- as.data.frame(X_a) }else{ @@ -975,7 +989,7 @@ for(t in seq_len(nt)) { # if(length(is.na(Pa)) == length(Pa)){ analysis <- mu.a }else{ - analysis <- as.data.frame(rmvnorm(as.numeric(nrow(X)), mu.a, Pa, method = "svd")) + analysis <- as.data.frame(mvtnorm::rmvnorm(as.numeric(nrow(X)), mu.a, Pa, method = "svd")) } @@ -1014,7 +1028,7 @@ for(t in seq_len(nt)) { # })) if(any(obs)){ - Y.order <- na.omit(pmatch(colnames(X), colnames(Ybar))) + Y.order <- stats::na.omit(pmatch(colnames(X), colnames(Ybar))) Ybar <- Ybar[,Y.order] Ybar[is.na(Ybar)] <- 0 YCI <- t(as.matrix(sapply(obs.cov[t1:t], function(x) { @@ -1031,14 +1045,14 @@ for(t in seq_len(nt)) { # YCI <- matrix(NA,nrow=length(t1:t), ncol=max(length(names.y),1)) } - par(mfrow = c(2, 1)) + graphics::par(mfrow = c(2, 1)) for (i in 1:ncol(FORECAST[[t]])) { # Xbar <- plyr::laply(FORECAST[t1:t], function(x) { mean(x[, i], na.rm = TRUE) }) - Xci <- plyr::laply(FORECAST[t1:t], function(x) { quantile(x[, i], c(0.025, 0.975), na.rm = TRUE) }) + Xci <- plyr::laply(FORECAST[t1:t], function(x) { stats::quantile(x[, i], c(0.025, 0.975), na.rm = TRUE) }) Xa <- plyr::laply(ANALYSIS[t1:t], function(x) { mean(x[, i], na.rm = TRUE) }) - XaCI <- plyr::laply(ANALYSIS[t1:t], function(x) { quantile(x[, i], c(0.025, 0.975), na.rm = TRUE) }) + XaCI <- plyr::laply(ANALYSIS[t1:t], function(x) { stats::quantile(x[, i], c(0.025, 0.975), na.rm = TRUE) }) ylab.names <- unlist(sapply(settings$state.data.assimilation$state.variable, function(x) { x })[2, ], use.names = FALSE) @@ -1052,11 +1066,11 @@ for(t in seq_len(nt)) { # xlab = "Year", ylab = ylab.names[grep(colnames(X)[i], var.names)], main = colnames(X)[i]) - ciEnvelope(as.Date(obs.times[t1:t]), + PEcAn.visualization::ciEnvelope(as.Date(obs.times[t1:t]), as.numeric(Ybar[, i]) - as.numeric(YCI[, i]) * 1.96, as.numeric(Ybar[, i]) + as.numeric(YCI[, i]) * 1.96, col = alphagreen) - lines(as.Date(obs.times[t1:t]), + graphics::lines(as.Date(obs.times[t1:t]), as.numeric(Ybar[, i]), type = "l", col = "darkgreen", @@ -1072,12 +1086,12 @@ for(t in seq_len(nt)) { # } # forecast - ciEnvelope(as.Date(obs.times[t1:t]), Xci[, 1], Xci[, 2], col = alphablue) #col='lightblue') - lines(as.Date(obs.times[t1:t]), Xbar, col = "darkblue", type = "l", lwd = 2) + PEcAn.visualization::ciEnvelope(as.Date(obs.times[t1:t]), Xci[, 1], Xci[, 2], col = alphablue) #col='lightblue') + graphics::lines(as.Date(obs.times[t1:t]), Xbar, col = "darkblue", type = "l", lwd = 2) # analysis - ciEnvelope(as.Date(obs.times[t1:t]), XaCI[, 1], XaCI[, 2], col = alphapink) - lines(as.Date(obs.times[t1:t]), Xa, col = "black", lty = 2, lwd = 2) + PEcAn.visualization::ciEnvelope(as.Date(obs.times[t1:t]), XaCI[, 1], XaCI[, 2], col = alphapink) + graphics::lines(as.Date(obs.times[t1:t]), Xa, col = "black", lty = 2, lwd = 2) #legend('topright',c('Forecast','Data','Analysis'),col=c(alphablue,alphagreen,alphapink),lty=1,lwd=5) } } @@ -1165,7 +1179,7 @@ for(t in seq_len(nt)) { # ###-------------------------------------------------------------------### if(nens > 1){ - pdf(file.path(settings$outdir, "sda.enkf.time-series.pdf")) + grDevices::pdf(file.path(settings$outdir, "sda.enkf.time-series.pdf")) names.y <- unique(unlist(lapply(obs.mean[t1:t], function(x) { names(x) }))) Ybar <- t(sapply(obs.mean[t1:t], function(x) { @@ -1175,7 +1189,7 @@ for(t in seq_len(nt)) { # tmp[mch] <- x[mch] tmp })) - Y.order <- na.omit(pmatch(colnames(FORECAST[[t]]), colnames(Ybar))) + Y.order <- stats::na.omit(pmatch(colnames(FORECAST[[t]]), colnames(Ybar))) Ybar <- Ybar[,Y.order] YCI <- t(as.matrix(sapply(obs.cov[t1:t], function(x) { if (is.null(x)) { @@ -1195,7 +1209,7 @@ for(t in seq_len(nt)) { # Xbar <- plyr::laply(FORECAST[t1:t], function(x) { mean(x[, i], na.rm = TRUE) }) #/rowSums(x[,1:9],na.rm = T) Xci <- plyr::laply(FORECAST[t1:t], function(x) { - quantile(x[, i], c(0.025, 0.975),na.rm = T) }) + stats::quantile(x[, i], c(0.025, 0.975),na.rm = T) }) Xci[is.na(Xci)]<-0 @@ -1221,41 +1235,41 @@ for(t in seq_len(nt)) { # # observation / data if (i<10) { # - ciEnvelope(as.Date(obs.times[t1:t]), + PEcAn.visualization::ciEnvelope(as.Date(obs.times[t1:t]), as.numeric(Ybar[, i]) - as.numeric(YCI[, i]) * 1.96, as.numeric(Ybar[, i]) + as.numeric(YCI[, i]) * 1.96, col = alphagreen) - lines(as.Date(obs.times[t1:t]), + graphics::lines(as.Date(obs.times[t1:t]), as.numeric(Ybar[, i]), type = "l", col = "darkgreen", lwd = 2) } # forecast - ciEnvelope(as.Date(obs.times[t1:t]), Xci[, 1], Xci[, 2], col = alphablue) #col='lightblue') #alphablue - lines(as.Date(obs.times[t1:t]), Xbar, col = "darkblue", type = "l", lwd = 2) #"darkblue" + PEcAn.visualization::ciEnvelope(as.Date(obs.times[t1:t]), Xci[, 1], Xci[, 2], col = alphablue) #col='lightblue') #alphablue + graphics::lines(as.Date(obs.times[t1:t]), Xbar, col = "darkblue", type = "l", lwd = 2) #"darkblue" # analysis - ciEnvelope(as.Date(obs.times[t1:t]), XaCI[, 1], XaCI[, 2], col = alphapink) #alphapink - lines(as.Date(obs.times[t1:t]), Xa, col = "black", lty = 2, lwd = 2) #"black" + PEcAn.visualization::ciEnvelope(as.Date(obs.times[t1:t]), XaCI[, 1], XaCI[, 2], col = alphapink) #alphapink + graphics::lines(as.Date(obs.times[t1:t]), Xa, col = "black", lty = 2, lwd = 2) #"black" - legend('topright',c('Forecast','Data','Analysis'),col=c(alphablue,alphagreen,alphapink),lty=1,lwd=5) + graphics::legend('topright',c('Forecast','Data','Analysis'),col=c(alphablue,alphagreen,alphapink),lty=1,lwd=5) } - dev.off() + grDevices::dev.off() ###-------------------------------------------------------------------### ### bias diagnostics ### ###-------------------------------------------------------------------### - pdf(file.path(settings$outdir, "bias.diagnostic.pdf")) + grDevices::pdf(file.path(settings$outdir, "bias.diagnostic.pdf")) for (i in seq_along(obs.mean[[1]])) { Xbar <- plyr::laply(FORECAST[t1:t], function(x) { mean(x[, i], na.rm = TRUE) }) - Xci <- plyr::laply(FORECAST[t1:t], function(x) { quantile(x[, i], c(0.025, 0.975)) }) + Xci <- plyr::laply(FORECAST[t1:t], function(x) { stats::quantile(x[, i], c(0.025, 0.975)) }) Xa <- plyr::laply(ANALYSIS[t1:t], function(x) { mean(x[, i], na.rm = TRUE) }) - XaCI <- plyr::laply(ANALYSIS[t1:t], function(x) { quantile(x[, i], c(0.025, 0.975)) }) + XaCI <- plyr::laply(ANALYSIS[t1:t], function(x) { stats::quantile(x[, i], c(0.025, 0.975)) }) if(length(which(is.na(Ybar[,i])))>=length(t1:t)) next() - reg <- lm(Xbar[t1:t] - unlist(Ybar[, i]) ~ c(t1:t)) + reg <- stats::lm(Xbar[t1:t] - unlist(Ybar[, i]) ~ c(t1:t)) plot(t1:t, Xbar - unlist(Ybar[, i]), pch = 16, cex = 1, @@ -1263,18 +1277,18 @@ for(t in seq_len(nt)) { # xlab = "Time", ylab = "Error", main = paste(colnames(X)[i], " Error = Forecast - Data")) - ciEnvelope(rev(t1:t), + PEcAn.visualization::ciEnvelope(rev(t1:t), rev(Xci[, 1] - unlist(Ybar[, i])), rev(Xci[, 2] - unlist(Ybar[, i])), col = alphabrown) - abline(h = 0, lty = 2, lwd = 2) - abline(reg) - mtext(paste("slope =", signif(summary(reg)$coefficients[2], digits = 3), + graphics::abline(h = 0, lty = 2, lwd = 2) + graphics::abline(reg) + graphics::mtext(paste("slope =", signif(summary(reg)$coefficients[2], digits = 3), "intercept =", signif(summary(reg)$coefficients[1], digits = 3))) # d<-density(c(Xbar[t1:t] - unlist(Ybar[t1:t,i]))) lines(d$y+1,d$x) # forecast minus analysis = update - reg1 <- lm(Xbar - Xa ~ c(t1:t)) + reg1 <- stats::lm(Xbar - Xa ~ c(t1:t)) plot(t1:t, Xbar - Xa, pch = 16, cex = 1, @@ -1282,13 +1296,13 @@ for(t in seq_len(nt)) { # xlab = "Time", ylab = "Update", main = paste(colnames(X)[i], "Update = Forecast - Analysis")) - ciEnvelope(rev(t1:t), + PEcAn.visualization::ciEnvelope(rev(t1:t), rev(Xbar - XaCI[, 1]), rev(Xbar - XaCI[, 2]), col = alphapurple) - abline(h = 0, lty = 2, lwd = 2) - abline(reg1) - mtext(paste("slope =", signif(summary(reg1)$coefficients[2], digits = 3), + graphics::abline(h = 0, lty = 2, lwd = 2) + graphics::abline(reg1) + graphics::mtext(paste("slope =", signif(summary(reg1)$coefficients[2], digits = 3), "intercept =", signif(summary(reg1)$coefficients[1], digits = 3))) # d<-density(c(Xbar[t1:t] - Xa[t1:t])) lines(d$y+1,d$x) @@ -1315,27 +1329,27 @@ for(t in seq_len(nt)) { # } } - dev.off() + grDevices::dev.off() ###-------------------------------------------------------------------### ### process variance plots ### ###-------------------------------------------------------------------### if (processvar) { - pdf('process.var.plots.pdf') + grDevices::pdf('process.var.plots.pdf') - cor.mat <- cov2cor(solve(enkf.params[[t]]$q.bar)) + cor.mat <- stats::cov2cor(solve(enkf.params[[t]]$q.bar)) colnames(cor.mat) <- colnames(X) rownames(cor.mat) <- colnames(X) - par(mfrow = c(1, 1), mai = c(1, 1, 4, 1)) + graphics::par(mfrow = c(1, 1), mai = c(1, 1, 4, 1)) corrplot::corrplot(cor.mat, type = "upper", tl.srt = 45,order='FPC') - par(mfrow=c(1,1)) + graphics::par(mfrow=c(1,1)) plot(as.Date(obs.times[t1:t]), unlist(lapply(enkf.params,'[[','n')), pch = 16, cex = 1, ylab = "Degrees of Freedom", xlab = "Time") - dev.off() + grDevices::dev.off() } @@ -1367,7 +1381,7 @@ for(t in seq_len(nt)) { # } - pdf(file.path(settings$outdir, "sda.enkf.time-series.pdf")) + grDevices::pdf(file.path(settings$outdir, "sda.enkf.time-series.pdf")) names.y <- unique(unlist(lapply(obs.mean[t1:t], function(x) { names(x) }))) Ybar <- t(sapply(obs.mean[t1:t], function(x) { @@ -1377,7 +1391,7 @@ for(t in seq_len(nt)) { # tmp[mch] <- x[mch] tmp })) - Y.order <- na.omit(pmatch(colnames(FORECAST[[t]]), colnames(Ybar))) + Y.order <- stats::na.omit(pmatch(colnames(FORECAST[[t]]), colnames(Ybar))) Ybar <- Ybar[,Y.order] YCI <- t(as.matrix(sapply(obs.cov[t1:t], function(x) { if (is.null(x)) { @@ -1397,7 +1411,7 @@ for(t in seq_len(nt)) { # Xbar <- plyr::laply(FORECAST[t1:t], function(x) { mean(x[, i], na.rm = TRUE) }) #/rowSums(x[,1:9],na.rm = T) Xci <- plyr::laply(FORECAST[t1:t], function(x) { - quantile(x[, i], c(0.025, 0.975),na.rm = T) }) + stats::quantile(x[, i], c(0.025, 0.975),na.rm = T) }) Xci[is.na(Xci)]<-0 @@ -1408,7 +1422,7 @@ for(t in seq_len(nt)) { # mean(x[, i],na.rm = T) }) XaCI <- plyr::laply(ANALYSIS[t1:t], function(x) { - quantile(x[, i], c(0.025, 0.975),na.rm = T )}) + stats::quantile(x[, i], c(0.025, 0.975),na.rm = T )}) Xa <- Xa XaCI <- XaCI @@ -1423,41 +1437,41 @@ for(t in seq_len(nt)) { # # observation / data if (i=length(t1:t)) next() - reg <- lm(Xbar[t1:t] - unlist(Ybar[, i]) ~ c(t1:t)) + reg <- stats::lm(Xbar[t1:t] - unlist(Ybar[, i]) ~ c(t1:t)) plot(t1:t, Xbar - unlist(Ybar[, i]), pch = 16, cex = 1, @@ -1465,18 +1479,18 @@ for(t in seq_len(nt)) { # xlab = "Time", ylab = "Error", main = paste(colnames(X)[i], " Error = Forecast - Data")) - ciEnvelope(rev(t1:t), + PEcAn.visualization::ciEnvelope(rev(t1:t), rev(Xci[, 1] - unlist(Ybar[, i])), rev(Xci[, 2] - unlist(Ybar[, i])), col = alphabrown) - abline(h = 0, lty = 2, lwd = 2) - abline(reg) - mtext(paste("slope =", signif(summary(reg)$coefficients[2], digits = 3), + graphics::abline(h = 0, lty = 2, lwd = 2) + graphics::abline(reg) + graphics::mtext(paste("slope =", signif(summary(reg)$coefficients[2], digits = 3), "intercept =", signif(summary(reg)$coefficients[1], digits = 3))) # d<-density(c(Xbar[t1:t] - unlist(Ybar[t1:t,i]))) lines(d$y+1,d$x) # forecast minus analysis = update - reg1 <- lm(Xbar - Xa ~ c(t1:t)) + reg1 <- stats::lm(Xbar - Xa ~ c(t1:t)) plot(t1:t, Xbar - Xa, pch = 16, cex = 1, @@ -1484,38 +1498,38 @@ for(t in seq_len(nt)) { # xlab = "Time", ylab = "Update", main = paste(colnames(X)[i], "Update = Forecast - Analysis")) - ciEnvelope(rev(t1:t), + PEcAn.visualization::ciEnvelope(rev(t1:t), rev(Xbar - XaCI[, 1]), rev(Xbar - XaCI[, 2]), col = alphapurple) - abline(h = 0, lty = 2, lwd = 2) - abline(reg1) - mtext(paste("slope =", signif(summary(reg1)$coefficients[2], digits = 3), + graphics::abline(h = 0, lty = 2, lwd = 2) + graphics::abline(reg1) + graphics::mtext(paste("slope =", signif(summary(reg1)$coefficients[2], digits = 3), "intercept =", signif(summary(reg1)$coefficients[1], digits = 3))) # d<-density(c(Xbar[t1:t] - Xa[t1:t])) lines(d$y+1,d$x) } - dev.off() + grDevices::dev.off() ###-------------------------------------------------------------------### ### process variance plots ### ###-------------------------------------------------------------------### if (processvar) { - pdf('process.var.plots.pdf') + grDevices::pdf('process.var.plots.pdf') - cor.mat <- cov2cor(aqq[t,,] / bqq[t]) + cor.mat <- stats::cov2cor(aqq[t,,] / bqq[t]) colnames(cor.mat) <- colnames(X) rownames(cor.mat) <- colnames(X) - par(mfrow = c(1, 1), mai = c(1, 1, 4, 1)) + graphics::par(mfrow = c(1, 1), mai = c(1, 1, 4, 1)) corrplot::corrplot(cor.mat, type = "upper", tl.srt = 45,order='FPC') - par(mfrow=c(1,1)) + graphics::par(mfrow=c(1,1)) plot(as.Date(obs.times[t1:t]), bqq[t1:t], pch = 16, cex = 1, ylab = "Degrees of Freedom", xlab = "Time") - dev.off() + grDevices::dev.off() } diff --git a/modules/assim.sequential/R/sda.enkf_MultiSite.R b/modules/assim.sequential/R/sda.enkf_MultiSite.R index d5975c38c7f..81b77c6ba33 100644 --- a/modules/assim.sequential/R/sda.enkf_MultiSite.R +++ b/modules/assim.sequential/R/sda.enkf_MultiSite.R @@ -1,24 +1,37 @@ -#' @title sda.enkf.multisite -#' @name sda.enkf.multisite +#' State Variable Data Assimilation: Ensemble Kalman Filter and Generalized ensemble filter +#' +#' Check out SDA_control function for more details on the control arguments. +#' +#' Restart mode: Basic idea is that during a restart (primary case envisioned +#' as an iterative forecast), a new workflow folder is created and the previous +#' forecast for the start_time is copied over. During restart the initial run +#' before the loop is skipped, with the info being populated from the previous +#' run. The function then dives right into the first Analysis, then continues +#' on like normal. +#' #' @author Michael Dietze, Ann Raiho and Alexis Helgeson \email{dietze@@bu.edu} #' #' @param settings PEcAn settings object -#' @param obs.mean List of dataframe of observation means, named with observation datetime. -#' @param obs.cov List of covariance matrices of state variables , named with observation datetime. +#' @param obs.mean Lists of date times named by time points, which contains lists of sites named by site ids, which contains observation means for each state variables of each site for each time point. +#' @param obs.cov Lists of date times named by time points, which contains lists of sites named by site ids, which contains observation covariances for all state variables of each site for each time point. #' @param Q Process covariance matrix given if there is no data to estimate it. -#' @param restart Used for iterative updating previous forecasts. Default NULL. List object includes file path to previous runs and start date for SDA -#' @param forceRun Used to force job.sh files that were not run for ensembles in SDA (quick fix) -#' @param keepNC Used for debugging issues. .nc files are usually removed after each year in the out folder. This flag will keep the .nc + .nc.var files for futher investigations. -#' @param pre_enkf_params Used for carrying out SDA with pre-existed enkf.params, in which the Pf, aqq, and bqq can be used for the analysis step. -#' @param run_parallel If allows to proceed under parallel mode, default is TRUE. +#' @param restart Used for iterative updating previous forecasts. Default NULL. List object includes file path to previous runs and start date for SDA. +#' @param pre_enkf_params Used for passing pre-existing time-series of process error into the current SDA runs to ignore the impact by the differences between process errors. #' @param ensemble.samples Pass ensemble.samples from outside to avoid GitHub check issues. -#' @param control List of flags controlling the behaviour of the SDA. trace for reporting back the SDA outcomes, interactivePlot for plotting the outcomes after each step, -#' TimeseriesPlot for post analysis examination, BiasPlot for plotting the correlation between state variables, plot.title is the title of post analysis plots and debug mode allows for pausing the code and examining the variables inside the function. -#' -#’ @details -#’ Restart mode: Basic idea is that during a restart (primary case envisioned as an iterative forecast), a new workflow folder is created and the previous forecast for the start_time is copied over. During restart the initial run before the loop is skipped, with the info being populated from the previous run. The function then dives right into the first Analysis, then continues on like normal. -#' -#' @description State Variable Data Assimilation: Ensemble Kalman Filter and Generalized ensemble filter. Check out SDA_control function for more details on the control arguments. +#' @param control List of flags controlling the behavior of the SDA. +#' `trace` for reporting back the SDA outcomes; +#' `TimeseriesPlot` for post analysis examination; +#' `debug` decide if we want to pause the code and examining the variables inside the function; +#' `pause` decide if we want to pause the SDA workflow at current time point t; +#' `Profiling` decide if we want to export the temporal SDA outputs in CSV file; +#' `OutlierDetection` decide if we want to execute the outlier detection each time after the model forecasting; +#' `parallel_qsub` decide if we want to execute the `qsub` job submission under parallel mode; +#' `send_email` contains lists for sending email to report the SDA progress; +#' `keepNC` decide if we want to keep the NetCDF files inside the out directory; +#' `forceRun` decide if we want to proceed the Bayesian MCMC sampling without observations; +#' `run_parallel` decide if we want to run the SDA under parallel mode for the `future_map` function; +#' `MCMC.args` include lists for controling the MCMC sampling process (iteration, nchains, burnin, and nthin.). +#' @param ... Additional arguments, currently ignored #' #' @return NONE #' @import nimble furrr @@ -29,22 +42,20 @@ sda.enkf.multisite <- function(settings, obs.cov, Q = NULL, restart = NULL, - forceRun = TRUE, - keepNC = TRUE, pre_enkf_params = NULL, - run_parallel = TRUE, ensemble.samples = NULL, control=list(trace = TRUE, - FF = FALSE, - interactivePlot = FALSE, TimeseriesPlot = FALSE, - BiasPlot = FALSE, - plot.title = NULL, - facet.plots = FALSE, debug = FALSE, pause = FALSE, Profiling = FALSE, - OutlierDetection=FALSE), + OutlierDetection=FALSE, + parallel_qsub = TRUE, + send_email = NULL, + keepNC = TRUE, + forceRun = TRUE, + run_parallel = TRUE, + MCMC.args = NULL), ...) { #add if/else for when restart points to folder instead if T/F set restart as T if(is.list(restart)){ @@ -54,7 +65,7 @@ sda.enkf.multisite <- function(settings, }else{ restart_flag = FALSE } - if(run_parallel){ + if(control$run_parallel){ if (future::supportsMulticore()) { future::plan(future::multicore) } else { @@ -62,7 +73,7 @@ sda.enkf.multisite <- function(settings, } } if (control$debug) browser() - tictoc::tic("Prepration") + tictoc::tic("Preparation") ###-------------------------------------------------------------------### ### read settings ### ###-------------------------------------------------------------------### @@ -125,9 +136,9 @@ sda.enkf.multisite <- function(settings, #Here I'm trying to make a temp config list name and put it into map to iterate if(multi.site.flag){ conf.settings<-settings - site.ids <- conf.settings %>% purrr::map(~.x[['run']] ) %>% purrr::map('site') %>% purrr::map('id') %>% unlist() %>% as.character() + site.ids <- conf.settings$run %>% purrr::map('site') %>% purrr::map('id') %>% base::unlist() %>% base::as.character() # a matrix ready to be sent to spDistsN1 in sp package - first col is the long second is the lat and row names are the site ids - site.locs <- conf.settings %>% purrr::map(~.x[['run']] ) %>% purrr::map('site') %>% purrr::map_dfr(~c(.x[['lon']],.x[['lat']]) %>% as.numeric)%>% + site.locs <- conf.settings$run %>% purrr::map('site') %>% purrr::map_dfr(~c(.x[['lon']],.x[['lat']]) %>% as.numeric)%>% t %>% `colnames<-`(c("Lon","Lat")) %>% `rownames<-`(site.ids) @@ -208,7 +219,7 @@ sda.enkf.multisite <- function(settings, dir.create(paste0(settings$outdir, "/Extracted_met/")) } - conf.settings<-conf.settings %>% + conf.settings <-conf.settings %>% `class<-`(c("list")) %>% #until here, it separates all the settings for all sites that listed in the xml file furrr::future_map(function(settings) { library(paste0("PEcAn.",settings$model$type), character.only = TRUE)#solved by including the model in the settings @@ -263,7 +274,7 @@ sda.enkf.multisite <- function(settings, }else{ PEcAn.logger::logger.info("The SDA output from the older simulation doesn't exist, assuming first SDA run with unconstrainded forecast output") #loading param info from previous forecast - if(is.null(ensemble.samples)){ + if(!exists("ensemble.samples") || is.null(ensemble.samples)){ load(file.path(old.dir, "samples.Rdata")) } #assuming that will only use previous unconstrained forecast runs for first run with SDA which means we are at t=1 @@ -276,6 +287,7 @@ sda.enkf.multisite <- function(settings, #build X using previous forecast output #out.configs object required to build X and restart.list object required for build X + #TODO: there should be an easier way to do this than to rerun write.ensemble.configs restart.list <- vector("list", length(conf.settings)) out.configs <- conf.settings %>% `class<-`(c("list")) %>% @@ -299,12 +311,12 @@ sda.enkf.multisite <- function(settings, new.params = new.params, nens = nens, read_restart_times = read_restart_times, - outdir = paste0(old.dir, "out/"), + outdir = file.path(old.dir, "out/"), t = 1, var.names = var.names, my.read_restart = my.read_restart, restart_flag = restart_flag) - + #let's read the parameters of each site/ens params.list <- reads %>% purrr::map(~.x %>% purrr::map("params")) # Now let's read the state variables of site/ens @@ -344,14 +356,16 @@ sda.enkf.multisite <- function(settings, new.params <- sda_matchparam(settings, ensemble.samples, site.ids, nens) } #sample met ensemble members - inputs <- conf.settings %>% purrr::map(function(setting) { + #TODO: incorporate Phyllis's restart work + # sample all inputs specified in the settings$ensemble not just met + inputs <- PEcAn.settings::papply(conf.settings,function(setting) { PEcAn.uncertainty::input.ens.gen( settings = setting, input = "met", method = setting$ensemble$samplingspace$met$method, parent_ids = NULL ) - }) + }) ###------------------------------------------------------------------------------------------------### ### loop over time ### ###------------------------------------------------------------------------------------------------### @@ -365,9 +379,9 @@ sda.enkf.multisite <- function(settings, if (t>1){ #for next time step split the met if model requires #-Splitting the input for the models that they don't care about the start and end time of simulations and they run as long as their met file. - inputs.split <- PEcAnAssimSequential::metSplit(conf.settings, inputs, settings, model, no_split = FALSE, obs.times, t, nens, restart_flag = FALSE, my.split_inputs) + inputs.split <- metSplit(conf.settings, inputs, settings, model, no_split = FALSE, obs.times, t, nens, restart_flag = FALSE, my.split_inputs) - #---------------- setting up the restart argument for each site separatly and keeping them in a list + #---------------- setting up the restart argument for each site separately and keeping them in a list restart.list <- furrr::future_pmap(list(out.configs, conf.settings %>% `class<-`(c("list")), params.list, inputs.split), function(configs, settings, new.params, inputs) { @@ -389,7 +403,7 @@ sda.enkf.multisite <- function(settings, ensemble.id = settings$ensemble$ensemble.id ) }) - } else { + } else { ## t == 1 restart.list <- vector("list", length(conf.settings)) } #add flag for restart t=1 to skip model runs @@ -416,25 +430,52 @@ sda.enkf.multisite <- function(settings, }) %>% stats::setNames(site.ids) - #I'm rewrting the runs because when I use the parallel appraoch for wrting configs the run.txt will get messed up; because multiple cores want to write on it at the same time. + #if it's a rabbitmq job sumbmission, we will first copy and paste the whole run folder within the SDA to the remote host. + if (!is.null(settings$host$rabbitmq)) { + settings$host$rabbitmq$prefix <- paste0(obs.year, ".nc") + cp2cmd <- gsub("@RUNDIR@", settings$host$rundir, settings$host$rabbitmq$cp2cmd) + try(system(cp2cmd, intern = TRUE)) + } + + #I'm rewriting the runs because when I use the parallel approach for writing configs the run.txt will get messed up; because multiple cores want to write on it at the same time. runs.tmp <- list.dirs(rundir, full.names = F) runs.tmp <- runs.tmp[grepl("ENS-*|[0-9]", runs.tmp)] writeLines(runs.tmp[runs.tmp != ''], file.path(rundir, 'runs.txt')) - PEcAn.workflow::start_model_runs(settings, write=settings$database$bety$write) - + paste(file.path(rundir, 'runs.txt')) ## testing + Sys.sleep(0.01) ## testing + if(control$parallel_qsub){ + if (is.null(control$jobs.per.file)) { + PEcAn.remote::qsub_parallel(settings, prefix = paste0(obs.year, ".nc")) + } else { + PEcAn.remote::qsub_parallel(settings, files=PEcAn.remote::merge_job_files(settings, control$jobs.per.file), prefix = paste0(obs.year, ".nc")) + } + }else{ + PEcAn.workflow::start_model_runs(settings, write=settings$database$bety$write) + } #------------- Reading - every iteration and for SDA - #put building of X into a function that gets called - reads <- build_X(out.configs = out.configs, - settings = settings, - new.params = new.params, - nens = nens, - read_restart_times = read_restart_times, - outdir = outdir, - t = t, - var.names = var.names, - my.read_restart = my.read_restart, - restart_flag = restart_flag) + max_t <- 0 + while("try-error" %in% class( + try(reads <- build_X(out.configs = out.configs, + settings = settings, + new.params = new.params, + nens = nens, + read_restart_times = read_restart_times, + outdir = outdir, + t = t, + var.names = var.names, + my.read_restart = my.read_restart, + restart_flag = restart_flag), silent = T)) + ){ + Sys.sleep(10) + max_t <- max_t + 1 + if(max_t > 3){ + PEcAn.logger::logger.info("Can't find outputed NC file! Please rerun the code!") + break + return(0) + } + PEcAn.logger::logger.info("Empty folder, try again!") + } if (control$debug) browser() #let's read the parameters of each site/ens @@ -455,12 +496,13 @@ sda.enkf.multisite <- function(settings, # GWBI AbvGrndWood GWBI AbvGrndWood #[1,] 3.872521 37.2581 3.872521 37.2581 # But therer is an attribute called `Site` which tells yout what column is for what site id - check out attr (X,"Site") - if (multi.site.flag) + if (multi.site.flag){ X <- X %>% purrr::map_dfc(~.x) %>% as.matrix() %>% `colnames<-`(c(rep(var.names, length(X)))) %>% `attr<-`('Site',c(rep(site.ids, each=length(var.names)))) + } } ## end else from restart & t==1 FORECAST[[obs.t]] <- X @@ -468,126 +510,143 @@ sda.enkf.multisite <- function(settings, ###-------------------------------------------------------------------### ### preparing OBS ### ###-------------------------------------------------------------------###---- - if (!is.null(obs.mean[[t]][[1]])) { - if (control$debug) browser() - #Making R and Y - Obs.cons <- Construct.R(site.ids, var.names, obs.mean[[t]], obs.cov[[t]]) + #To trigger the analysis function with free run, you need to first specify the control$forceRun as TRUE, + #Then specify the settings$state.data.assimilation$scalef as 0, and settings$state.data.assimilation$free.run as TRUE. + if (!is.null(obs.mean[[t]][[1]]) | (as.logical(settings$state.data.assimilation$free.run) & control$forceRun)) { + # TODO: as currently configured, Analysis runs even if all obs are NA, + # which clearly should be triggering the `else` of this if, but the + # `else` has not been invoked in a while an may need updating - Y <- Obs.cons$Y - R <- Obs.cons$R - if (length(Y) > 1) { - PEcAn.logger::logger.info("The zero variances in R and Pf is being replaced by half and one fifth of the minimum variance in those matrices respectively.") - diag(R)[which(diag(R)==0)] <- min(diag(R)[which(diag(R) != 0)])/2 + #decide if we want to estimate the process variance and choose the according function. + if(processvar == FALSE) { + an.method<-EnKF + } else if (processvar == TRUE && settings$state.data.assimilation$q.type %in% c("SINGLE", "SITE")) { + an.method<-GEF.MultiSite } - # making the mapping operator - H <- Construct.H.multisite(site.ids, var.names, obs.mean[[t]]) - #Pass aqq and bqq. - aqq <- NULL - bqq <- numeric(nt + 1) - #if t>1 - if(is.null(pre_enkf_params) && t>1){ - aqq <- enkf.params[[t-1]]$aqq - bqq <- enkf.params[[t-1]]$bqq - X.new<-enkf.params[[t-1]]$X.new - } - if(!is.null(pre_enkf_params) && t>1){ - aqq <- pre_enkf_params[[t-1]]$aqq - bqq <- pre_enkf_params[[t-1]]$bqq - X.new<-pre_enkf_params[[t-1]]$X.new - } - if(!is.null(pre_enkf_params)){ - Pf <- pre_enkf_params[[t]]$Pf - } - - if(!exists('Cmcmc_tobit2space')) { - recompileTobit = TRUE - }else{ - recompileTobit = FALSE - } - - if(!exists('Cmcmc')) { - recompileGEF = TRUE - }else{ - recompileGEF = FALSE - } - #weight list - # This reads ensemble weights generated by `get_ensemble_weights` function from assim.sequential package - weight_list <- list() - if(!file.exists(file.path(settings$outdir, "ensemble_weights.Rdata"))){ - PEcAn.logger::logger.warn("ensemble_weights.Rdata cannot be found. Make sure you generate samples by running the get.ensemble.weights function before running SDA if you want the ensembles to be weighted.") - #create null list - for(tt in 1:length(obs.times)){ - weight_list[[tt]] <- rep(1,nens) #no weights + #decide if we want the block analysis function or multi-site analysis function. + if (processvar == TRUE && settings$state.data.assimilation$q.type %in% c("vector", "wishart")) { + #initialize block.list.all. + if (t == 1 | !exists("block.list.all")) { + block.list.all <- obs.mean %>% purrr::map(function(l){NULL}) } - } else{ - load(file.path(settings$outdir, "ensemble_weights.Rdata")) ## loads ensemble.samples - } - wts <- unlist(weight_list[[t]]) - ###-------------------------------------------------------------------### - ### Analysis ### - ###-------------------------------------------------------------------###---- - if(processvar == FALSE){an.method<-EnKF}else{an.method<-GEF.MultiSite} - - #-analysis function - if(t>1){ - pre_elements <- enkf.params[[t-1]]$elements.W.Data - }else{ - pre_elements <- NULL - } - enkf.params[[obs.t]] <- GEF.MultiSite( - settings, - FUN = an.method, - Forecast = list(Q = Q, X = X), - Observed = list(R = R, Y = Y), - H = H, - extraArg = list( - aqq = aqq, - bqq = bqq, - Pf = Pf, - t = t, - nitr.GEF = nitr.GEF, - nthin = nthin, - nburnin = nburnin, - censored.data = censored.data, - recompileGEF = recompileGEF, - recompileTobit = recompileTobit, - wts = wts, - pre_elements = pre_elements - ), - choose = choose, - nt = nt, - obs.mean = obs.mean, - nitr = 100000, - nburnin = 10000, - obs.cov = obs.cov, - site.ids = site.ids, - blocked.dis = blocked.dis, - distances = distances - ) - tictoc::tic(paste0("Preparing for Adjustment for cycle = ", t)) - #Forecast - mu.f <- enkf.params[[obs.t]]$mu.f - Pf <- enkf.params[[obs.t]]$Pf - #Analysis - Pa <- enkf.params[[obs.t]]$Pa - mu.a <- enkf.params[[obs.t]]$mu.a - #extracting extra outputs - if (control$debug) browser() - if (processvar) { - aqq <- enkf.params[[obs.t]]$aqq - bqq <- enkf.params[[obs.t]]$bqq - } - # Adding obs elements to the enkf.params - #This can later on help with diagnostics - enkf.params[[obs.t]] <- - c( - enkf.params[[obs.t]], - R = list(R), - Y = list(Y), - RestartList = list(restart.list %>% stats::setNames(site.ids)) + #initialize MCMC arguments. + if (is.null(control$MCMC.args)) { + MCMC.args <- list(niter = 1e5, + nthin = 10, + nchain = 3, + nburnin = 5e4) + } else { + MCMC.args <- control$MCMC.args + } + #running analysis function. + enkf.params[[obs.t]] <- analysis_sda_block(settings, block.list.all, X, obs.mean, obs.cov, t, nt, MCMC.args, pre_enkf_params) + enkf.params[[obs.t]] <- c(enkf.params[[obs.t]], RestartList = list(restart.list %>% stats::setNames(site.ids))) + block.list.all <- enkf.params[[obs.t]]$block.list.all + #Forecast + mu.f <- enkf.params[[obs.t]]$mu.f + Pf <- enkf.params[[obs.t]]$Pf + #Analysis + Pa <- enkf.params[[obs.t]]$Pa + mu.a <- enkf.params[[obs.t]]$mu.a + } else if (exists("an.method")) { + #Making R and Y + Obs.cons <- Construct.R(site.ids, var.names, obs.mean[[t]], obs.cov[[t]]) + Y <- Obs.cons$Y + R <- Obs.cons$R + if (length(Y) > 1) { + PEcAn.logger::logger.info("The zero variances in R and Pf is being replaced by half and one fifth of the minimum variance in those matrices respectively.") + diag(R)[which(diag(R)==0)] <- min(diag(R)[which(diag(R) != 0)])/2 + } + # making the mapping operator + H <- Construct.H.multisite(site.ids, var.names, obs.mean[[t]]) + #Pass aqq and bqq. + aqq <- NULL + bqq <- numeric(nt + 1) + Pf <- NULL + #if t>1 + if(is.null(pre_enkf_params) && t>1){ + aqq <- enkf.params[[t-1]]$aqq + bqq <- enkf.params[[t-1]]$bqq + X.new<-enkf.params[[t-1]]$X.new + } + if(!is.null(pre_enkf_params) && t>1){ + aqq <- pre_enkf_params[[t-1]]$aqq + bqq <- pre_enkf_params[[t-1]]$bqq + X.new<-pre_enkf_params[[t-1]]$X.new + } + if(!is.null(pre_enkf_params)){ + Pf <- pre_enkf_params[[t]]$Pf + } + recompileTobit = !exists('Cmcmc_tobit2space') + recompileGEF = !exists('Cmcmc') + #weight list + # This reads ensemble weights generated by `get_ensemble_weights` function from assim.sequential package + weight_list <- list() + if(!file.exists(file.path(settings$outdir, "ensemble_weights.Rdata"))){ + PEcAn.logger::logger.warn("ensemble_weights.Rdata cannot be found. Make sure you generate samples by running the get.ensemble.weights function before running SDA if you want the ensembles to be weighted.") + #create null list + for(tt in 1:length(obs.times)){ + weight_list[[tt]] <- rep(1,nens) #no weights + } + } else{ + load(file.path(settings$outdir, "ensemble_weights.Rdata")) ## loads ensemble.samples + } + wts <- unlist(weight_list[[t]]) + #-analysis function + enkf.params[[obs.t]] <- GEF.MultiSite( + settings, + FUN = an.method, + Forecast = list(Q = Q, X = X), + Observed = list(R = R, Y = Y), + H = H, + extraArg = list( + aqq = aqq, + bqq = bqq, + Pf = Pf, + t = t, + nitr.GEF = nitr.GEF, + nthin = nthin, + nburnin = nburnin, + censored.data = censored.data, + recompileGEF = recompileGEF, + recompileTobit = recompileTobit, + wts = wts + ), + choose = choose, + nt = nt, + obs.mean = obs.mean, + nitr = 100000, + nburnin = 10000, + obs.cov = obs.cov, + site.ids = site.ids, + blocked.dis = blocked.dis, + distances = distances ) + tictoc::tic(paste0("Preparing for Adjustment for cycle = ", t)) + #Forecast + mu.f <- enkf.params[[obs.t]]$mu.f + Pf <- enkf.params[[obs.t]]$Pf + #Analysis + Pa <- enkf.params[[obs.t]]$Pa + mu.a <- enkf.params[[obs.t]]$mu.a + #extracting extra outputs + if (control$debug) browser() + if (processvar) { + aqq <- enkf.params[[obs.t]]$aqq + bqq <- enkf.params[[obs.t]]$bqq + } + # Adding obs elements to the enkf.params + #This can later on help with diagnostics + enkf.params[[obs.t]] <- + c( + enkf.params[[obs.t]], + R = list(R), + Y = list(Y), + RestartList = list(restart.list %>% stats::setNames(site.ids)) + ) + } ###-------------------------------------------------------------------### ### Trace ### @@ -596,11 +655,9 @@ sda.enkf.multisite <- function(settings, if(control$trace) { PEcAn.logger::logger.warn ("\n --------------------------- ",obs.year," ---------------------------\n") PEcAn.logger::logger.warn ("\n --------------Obs mean----------- \n") - print(Y) + print(enkf.params[[obs.t]]$Y) PEcAn.logger::logger.warn ("\n --------------Obs Cov ----------- \n") - print(R) - PEcAn.logger::logger.warn ("\n --------------Obs H ----------- \n") - print(H) + print(enkf.params[[obs.t]]$R) PEcAn.logger::logger.warn ("\n --------------Forecast mean ----------- \n") print(enkf.params[[obs.t]]$mu.f) PEcAn.logger::logger.warn ("\n --------------Forecast Cov ----------- \n") @@ -633,18 +690,19 @@ sda.enkf.multisite <- function(settings, #will throw an error when q.bar and Pf are different sizes i.e. when you are running with no obs and do not variance for all state variables #Pa <- Pf + solve(q.bar) #hack have Pa = Pf for now - if(!is.null(pre_enkf_params)){ - Pf <- pre_enkf_params[[t]]$Pf - }else{ - Pf <- stats::cov(X) # Cov Forecast - This is used as an initial condition - } + # if(!is.null(pre_enkf_params)){ + # Pf <- pre_enkf_params[[t]]$Pf + # }else{ + # Pf <- stats::cov(X) # Cov Forecast - This is used as an initial condition + # } + Pf <- stats::cov(X) Pa <- Pf } enkf.params[[obs.t]] <- list(mu.f = mu.f, Pf = Pf, mu.a = mu.a, Pa = Pa) } ###-------------------------------------------------------------------### - ### adjustement/update state matrix ### + ### adjust/update state matrix ### ###-------------------------------------------------------------------###---- tictoc::tic(paste0("Adjustment for cycle = ", t)) if(adjustment == TRUE){ @@ -652,22 +710,18 @@ sda.enkf.multisite <- function(settings, } else { analysis <- as.data.frame(mvtnorm::rmvnorm(as.numeric(nrow(X)), mu.a, Pa, method = "svd")) } - analysis[analysis<0] <- 0 colnames(analysis) <- colnames(X) ##### Mapping analysis vectors to be in bounds of state variables - if(processvar==TRUE){ - for(i in 1:ncol(analysis)){ - int.save <- state.interval[which(startsWith(colnames(analysis)[i], - var.names)),] - analysis[analysis[,i] < int.save[1],i] <- int.save[1] - analysis[analysis[,i] > int.save[2],i] <- int.save[2] - } + for(i in 1:ncol(analysis)){ + int.save <- state.interval[which(startsWith(colnames(analysis)[i], + var.names)),] + analysis[analysis[,i] < int.save[1],i] <- int.save[1] + analysis[analysis[,i] > int.save[2],i] <- int.save[2] } ## in the future will have to be separated from analysis new.state <- as.data.frame(analysis) ANALYSIS[[obs.t]] <- analysis - ANALYSIS <-ANALYSIS ens_weights[[obs.t]] <- PEcAnAssimSequential::sda_weights_site(FORECAST, ANALYSIS, t, as.numeric(settings$ensemble$size)) ###-------------------------------------------------------------------### ### save outputs ### @@ -686,30 +740,32 @@ sda.enkf.multisite <- function(settings, tictoc::tic(paste0("Visulization for cycle = ", t)) #writing down the image - either you asked for it or nor :) - if ((t%%2 == 0 | t == nt) & (control$TimeseriesPlot) & !is.null(obs.mean[[t]][[1]])){ - post.analysis.multisite.ggplot(settings, - t, - obs.times, - obs.mean, - obs.cov, - FORECAST, - ANALYSIS , - plot.title=control$plot.title, - facetg=control$facet.plots, - readsFF=readsFF) - } + if ((t%%2 == 0 | t == nt) & (control$TimeseriesPlot)){ + if (as.logical(settings$state.data.assimilation$free.run)) { + SDA_timeseries_plot(ANALYSIS, FORECAST, obs.mean, obs.cov, settings$outdir, by = "var", types = c("FORECAST", "ANALYSIS")) + } else { + SDA_timeseries_plot(ANALYSIS, FORECAST, obs.mean, obs.cov, settings$outdir, by = "var", types = c("FORECAST", "ANALYSIS", "OBS")) + } + } #Saving the profiling result if (control$Profiling) alltocs(file.path(settings$outdir,"SDA", "Profiling.csv")) # remove files as SDA runs - if (!(keepNC)) - { - unlink(list.files(outdir, "*.nc", recursive = TRUE, full.names = TRUE)) - } - # useful for debugging to keep .nc files for assimilated years. T = 2, because this loops removes the files that were run when starting the next loop - if (keepNC && t == 1) - { + if (!(control$keepNC) && t == 1){ unlink(list.files(outdir, "*.nc", recursive = TRUE, full.names = TRUE)) } + if(!is.null(control$send_email)){ + sendmail <- Sys.which("sendmail") + mailfile <- tempfile("mail") + cat(paste0("From: ", control$send_email$from, "\n", "Subject: ", "SDA progress report", "\n", "To: ", control$send_email$to, "\n", "\n", paste("Time point:", obs.times[t], "has been completed!")), file = mailfile) + system2(sendmail, c("-f", paste0("\"", control$send_email$from, "\""), paste0("\"", control$send_email$to, "\""), "<", mailfile)) + unlink(mailfile) + } + gc() + # useful for debugging to keep .nc files for assimilated years. T = 2, because this loops removes the files that were run when starting the next loop +# if (keepNC && t == 1){ +# unlink(list.files(outdir, "*.nc", recursive = TRUE, full.names = TRUE)) +# } + ## MCD: I commented the above "if" out because if you are restarting from a previous forecast, this might delete the files in that earlier folder } ### end loop over time } # sda.enkf \ No newline at end of file diff --git a/modules/assim.sequential/R/sda.enkf_refactored.R b/modules/assim.sequential/R/sda.enkf_refactored.R index 2323de56092..f87b5eecc41 100644 --- a/modules/assim.sequential/R/sda.enkf_refactored.R +++ b/modules/assim.sequential/R/sda.enkf_refactored.R @@ -1,25 +1,37 @@ -#' @title sda.enkf -#' @name sda.enkf +#' State Variable Data Assimilation: Ensemble Kalman Filter and Generalized ensemble filter +#' +#' Restart mode: Basic idea is that during a restart (primary case +#' envisioned as an iterative forecast), a new workflow folder is created and +#' the previous forecast for the start_time is copied over. During restart the +#' initial run before the loop is skipped, with the info being populated from +#' the previous run. The function then dives right into the first Analysis, +#' then continues on like normal. +#' #' @author Michael Dietze and Ann Raiho \email{dietze@@bu.edu} #' #' @param settings PEcAn settings object -#' @param obs.mean List of dataframe of observation means, named with observation datetime. -#' @param obs.cov List of covariance matrices of state variables , named with observation datetime. -#' @param Q Process covariance matrix given if there is no data to estimate it. -#' @param restart Used for iterative updating previous forecasts. When the restart is TRUE it read the object in SDA folder written from previous SDA. -#' @param control List of flags controlling the behaviour of the SDA. trace for reporting back the SDA outcomes, interactivePlot for plotting the outcomes after each step, -#' TimeseriesPlot for post analysis examination, BiasPlot for plotting the correlation between state variables, plot.title is the title of post analysis plots and debug mode allows for pausing the code and examining the variables inside the function. +#' @param obs.mean List of dataframe of observation means, named with +#' observation datetime. +#' @param obs.cov List of covariance matrices of state variables , named with +#' observation datetime. +#' @param Q Process covariance matrix given if there is no data to +#' estimate it. +#' @param restart Used for iterative updating previous forecasts. When the +#' restart is TRUE it read the object in SDA folder written from previous +#' SDA. +#' @param control List of flags controlling the behaviour of the SDA. trace +#' for reporting back the SDA outcomes, interactivePlot for plotting the +#' outcomes after each step, TimeseriesPlot for post analysis examination, +#' BiasPlot for plotting the correlation between state variables, plot.title +#' is the title of post analysis plots and debug mode allows for pausing the +#' code and examining the variables inside the function. +#' @param ... Additional arguments, currently ignored #' -#’ @details -#’ Restart mode: Basic idea is that during a restart (primary case envisioned as an iterative forecast), a new workflow folder is created and the previous forecast for the start_time is copied over. During restart the initial run before the loop is skipped, with the info being populated from the previous run. The function then dives right into the first Analysis, then continues on like normal. -#' -#' @description State Variable Data Assimilation: Ensemble Kalman Filter and Generalized ensemble filter #' #' @return NONE #' @import nimble #' @export #' - sda.enkf <- function(settings, obs.mean, obs.cov, @@ -321,7 +333,7 @@ sda.enkf <- function(settings, #-------------------------- Writing the config/Running the model and reading the outputs for each ensemble - outconfig <- write.ensemble.configs(defaults = config.settings$pfts, + outconfig <- PEcAn.uncertainty::write.ensemble.configs(defaults = config.settings$pfts, ensemble.samples = ensemble.samples, settings = config.settings, model = config.settings$model$type, @@ -388,7 +400,7 @@ sda.enkf <- function(settings, if(sum(X,na.rm=T) == 0){ - logger.severe(paste('NO FORECAST for',obs.times[t],'Check outdir logfiles or read restart. Do you have the right variable names?')) + PEcAn.logger::logger.severe(paste('NO FORECAST for',obs.times[t],'Check outdir logfiles or read restart. Do you have the right variable names?')) } ###-------------------------------------------------------------------### @@ -519,7 +531,7 @@ sda.enkf <- function(settings, } else { mu.f <- as.numeric(apply(X, 2, mean, na.rm = TRUE)) - Pf <- cov(X) + Pf <- stats::cov(X) ###-------------------------------------------------------------------### ### No Observations -- ###---- ###-----------------------------------------------------------------### @@ -547,7 +559,7 @@ sda.enkf <- function(settings, if (processvar & exists('X.new')) {X.adj.arg <- X.new }else{ X.adj.arg <- X ; print('using X not X.new. Assuming GEF was skipped this iteration?')} analysis <-adj.ens(Pf, X.adj.arg, mu.f, mu.a, Pa) }else{ - analysis <- as.data.frame(rmvnorm(as.numeric(nrow(X)), mu.a, Pa, method = "svd")) + analysis <- as.data.frame(mvtnorm::rmvnorm(as.numeric(nrow(X)), mu.a, Pa, method = "svd")) } colnames(analysis) <- colnames(X) diff --git a/modules/assim.sequential/R/sda_matchparam.R b/modules/assim.sequential/R/sda_matchparam.R index 4c1383a9e69..13a0ea4d3b1 100644 --- a/modules/assim.sequential/R/sda_matchparam.R +++ b/modules/assim.sequential/R/sda_matchparam.R @@ -15,9 +15,13 @@ sda_matchparam <- function(settings, ensemble.samples, site.ids, nens){ all.pft.names <- names(ensemble.samples) #loop over each site. - for (i in 1:length(site.ids)) { + for (i in seq_along(site.ids)) { #match pft name site.pft.name <- settings[[i]]$run$site$site.pft$pft.name + if(is.null(site.pft.name)){ + site_pft = utils::read.csv(settings[[i]]$run$inputs$pft.site$path) + site.pft.name = site_pft$pft[site_pft$site == settings[[i]]$run$site$id] + } which.pft <- which(all.pft.names==site.pft.name) site.param <- list() diff --git a/modules/assim.sequential/R/sda_plotting.R b/modules/assim.sequential/R/sda_plotting.R index 39b606f69aa..a9a33b614f0 100755 --- a/modules/assim.sequential/R/sda_plotting.R +++ b/modules/assim.sequential/R/sda_plotting.R @@ -1,26 +1,30 @@ -#' @title generate_colors_sda -#' @name generate_colors_sda -#' @author Ann Raiho -#' @description This function generates a series of colors in its parents enviroment. This is mainly used in AssimSequential package. -#' @export +# @author Ann Raiho +# @description This function generates a series of colors. This is mainly used in AssimSequential package. generate_colors_sda <-function(){ - pink <<- col2rgb("deeppink") - alphapink <<- rgb(pink[1], pink[2], pink[3], 180, max = 255) - green <<- col2rgb("green") - alphagreen <<- rgb(green[1], green[2], green[3], 75, max = 255) - blue <<- col2rgb("blue") - alphablue <<- rgb(blue[1], blue[2], blue[3], 75, max = 255) - purple <<- col2rgb("purple") - alphapurple <<- rgb(purple[1], purple[2], purple[3], 75, max = 255) - brown <<- col2rgb("brown") - alphabrown <<- rgb(brown[1], brown[2], brown[3], 30, max = 255) + pink <- col2rgb("deeppink") + alphapink <- rgb(pink[1], pink[2], pink[3], 180, max = 255) + green <- col2rgb("green") + alphagreen <- rgb(green[1], green[2], green[3], 75, max = 255) + blue <- col2rgb("blue") + alphablue <- rgb(blue[1], blue[2], blue[3], 75, max = 255) + purple <- col2rgb("purple") + alphapurple <- rgb(purple[1], purple[2], purple[3], 75, max = 255) + brown <- col2rgb("brown") + alphabrown <- rgb(brown[1], brown[2], brown[3], 30, max = 255) + + return(list( + pink = alphapink, + green = alphagreen, + blue = alphablue, + purple = alphapurple, + brown = alphabrown)) } ##' Internal functions for plotting SDA outputs. Interactive, post analysis time-series and bias plots in base plotting system and ggplot ##' @param settings pecan standard settings list. ##' @param t current time - int number giving the position of the current time in obs.time. -##' @param obs.time vector of dates of measurements +##' @param obs.times vector of dates of measurements ##' @param obs.mean list of vectors of the means of observed data named by the measured date. ##' @param obs.cov list of cov matrices of the observed data named by the measured date. ##' @param obs list containing the mean and cov object @@ -39,9 +43,14 @@ interactive.plotting.sda<-function(settings, t, obs.times, obs.mean, obs.cov, ob "needed by `PEcAnAssimSequential::interactive.plotting.sda()`.", "Please install it and try again.") } - + if (!requireNamespace("PEcAn.visualization", quietly = TRUE)) { + PEcAn.logger::logger.error( + "Can't find package 'PEcAn.visualization',", + "needed by `PEcAnAssimSequential::interactive.plotting.sda()`.", + "Please install it and try again.") + } #Defining some colors - generate_colors_sda() + sda_colors <- generate_colors_sda() t1 <- 1 var.names <- var.names <- sapply(settings$state.data.assimilation$state.variable, '[[', "variable.name") names.y <- unique(unlist(lapply(obs.mean[t1:t], function(x) { names(x) }))) @@ -55,7 +64,7 @@ interactive.plotting.sda<-function(settings, t, obs.times, obs.mean, obs.cov, ob })) if(any(obs)){ - Y.order <- na.omit(pmatch(colnames(X), colnames(Ybar))) + Y.order <- stats::na.omit(pmatch(colnames(X), colnames(Ybar))) Ybar <- Ybar[,Y.order] Ybar[is.na(Ybar)] <- 0 YCI <- t(as.matrix(sapply(obs.cov[t1:t], function(x) { @@ -72,15 +81,15 @@ interactive.plotting.sda<-function(settings, t, obs.times, obs.mean, obs.cov, ob YCI <- matrix(NA,nrow=length(t1:t), ncol=max(length(names.y),1)) } - par(mfrow = c(2, 1)) + graphics::par(mfrow = c(2, 1)) colmax<-2 for (i in 1:ncol(FORECAST[[t]])) { # Xbar <- plyr::laply(FORECAST[t1:t], function(x) { mean(x[, i], na.rm = TRUE) }) - Xci <- plyr::laply(FORECAST[t1:t], function(x) { quantile(x[, i], c(0.025, 0.975), na.rm = TRUE) }) + Xci <- plyr::laply(FORECAST[t1:t], function(x) { stats::quantile(x[, i], c(0.025, 0.975), na.rm = TRUE) }) Xa <- plyr::laply(ANALYSIS[t1:t], function(x) { mean(x[, i], na.rm = TRUE) }) - XaCI <- plyr::laply(ANALYSIS[t1:t], function(x) { quantile(x[, i], c(0.025, 0.975), na.rm = TRUE) }) + XaCI <- plyr::laply(ANALYSIS[t1:t], function(x) { stats::quantile(x[, i], c(0.025, 0.975), na.rm = TRUE) }) ylab.names <- unlist(sapply(settings$state.data.assimilation$state.variable, function(x) { x })[2, ], use.names = FALSE) @@ -95,11 +104,11 @@ interactive.plotting.sda<-function(settings, t, obs.times, obs.mean, obs.cov, ob xlab = "Year", ylab = ylab.names[grep(colnames(X)[i], var.names)], main = colnames(X)[i]) - ciEnvelope(as.Date(obs.times[t1:t]), + PEcAn.visualization::ciEnvelope(as.Date(obs.times[t1:t]), as.numeric(Ybar[, i]) - as.numeric(YCI[, i]) * 1.96, as.numeric(Ybar[, i]) + as.numeric(YCI[, i]) * 1.96, - col = alphagreen) - lines(as.Date(obs.times[t1:t]), + col = sda_colors$green) + graphics::lines(as.Date(obs.times[t1:t]), as.numeric(Ybar[, i]), type = "l", col = "darkgreen", @@ -115,13 +124,13 @@ interactive.plotting.sda<-function(settings, t, obs.times, obs.mean, obs.cov, ob } # forecast - ciEnvelope(as.Date(obs.times[t1:t]), Xci[, 1], Xci[, 2], col = alphablue) #col='lightblue') - lines(as.Date(obs.times[t1:t]), Xbar, col = "darkblue", type = "l", lwd = 2) + PEcAn.visualization::ciEnvelope(as.Date(obs.times[t1:t]), Xci[, 1], Xci[, 2], col = sda_colors$blue) #col='lightblue') + graphics::lines(as.Date(obs.times[t1:t]), Xbar, col = "darkblue", type = "l", lwd = 2) # analysis - ciEnvelope(as.Date(obs.times[t1:t]), XaCI[, 1], XaCI[, 2], col = alphapink) - lines(as.Date(obs.times[t1:t]), Xa, col = "black", lty = 2, lwd = 2) - #legend('topright',c('Forecast','Data','Analysis'),col=c(alphablue,alphagreen,alphapink),lty=1,lwd=5) + PEcAn.visualization::ciEnvelope(as.Date(obs.times[t1:t]), XaCI[, 1], XaCI[, 2], col = sda_colors$pink) + graphics::lines(as.Date(obs.times[t1:t]), Xa, col = "black", lty = 2, lwd = 2) + #legend('topright', c('Forecast','Data','Analysis'), col=c(sda_colors$blue, sda_colors$green, sda_colors$pink), lty=1, lwd=5) } } @@ -136,13 +145,19 @@ postana.timeser.plotting.sda<-function(settings, t, obs.times, obs.mean, obs.cov "needed by `PEcAnAssimSequential::postana.timeser.plotting.sda()`.", "Please install it and try again.") } + if (!requireNamespace("PEcAn.visualization", quietly = TRUE)) { + PEcAn.logger::logger.error( + "Can't find package 'PEcAn.visualization',", + "needed by `PEcAnAssimSequential::postana.timeser.plotting.sda()`.", + "Please install it and try again.") + } #Defining some colors - generate_colors_sda() + sda_colors <- generate_colors_sda() t1 <- 1 var.names <- sapply(settings$state.data.assimilation$state.variable, '[[', "variable.name") #---- - pdf(file.path(settings$outdir,"SDA", "sda.enkf.time-series.pdf")) + grDevices::pdf(file.path(settings$outdir,"SDA", "sda.enkf.time-series.pdf")) names.y <- unique(unlist(lapply(obs.mean[t1:t], function(x) { names(x) }))) Ybar <- t(sapply(obs.mean[t1:t], function(x) { tmp <- rep(NA, length(names.y)) @@ -177,7 +192,7 @@ postana.timeser.plotting.sda<-function(settings, t, obs.times, obs.mean, obs.cov Xbar <- plyr::laply(FORECAST[t1:t], function(x) { mean(x[, i], na.rm = TRUE) }) #/rowSums(x[,1:9],na.rm = T) Xci <- plyr::laply(FORECAST[t1:t], function(x) { - quantile(x[, i], c(0.025, 0.975),na.rm = T) }) + stats::quantile(x[, i], c(0.025, 0.975),na.rm = T) }) Xci[is.na(Xci)]<-0 @@ -188,7 +203,7 @@ postana.timeser.plotting.sda<-function(settings, t, obs.times, obs.mean, obs.cov mean(x[, i],na.rm = T) }) XaCI <- plyr::laply(ANALYSIS[t1:t], function(x) { - quantile(x[, i], c(0.025, 0.975),na.rm = T )}) + stats::quantile(x[, i], c(0.025, 0.975),na.rm = T )}) Xa <- Xa XaCI <- XaCI @@ -203,28 +218,33 @@ postana.timeser.plotting.sda<-function(settings, t, obs.times, obs.mean, obs.cov # observation / data if (i<=ncol(X)) { # - ciEnvelope(as.Date(obs.times[t1:t]), + PEcAn.visualization::ciEnvelope(as.Date(obs.times[t1:t]), as.numeric(Ybar[, i]) - as.numeric(YCI[, i]) * 1.96, as.numeric(Ybar[, i]) + as.numeric(YCI[, i]) * 1.96, - col = alphagreen) - lines(as.Date(obs.times[t1:t]), + col = sda_colors$green) + graphics::lines(as.Date(obs.times[t1:t]), as.numeric(Ybar[, i]), type = "l", col = "darkgreen", lwd = 2) } # forecast - ciEnvelope(as.Date(obs.times[t1:t]), Xci[, 1], Xci[, 2], col = alphablue) #col='lightblue') #alphablue - lines(as.Date(obs.times[t1:t]), Xbar, col = "darkblue", type = "l", lwd = 2) #"darkblue" + PEcAn.visualization::ciEnvelope(as.Date(obs.times[t1:t]), Xci[, 1], Xci[, 2], col = sda_colors$blue) #col='lightblue') #alphablue + graphics::lines(as.Date(obs.times[t1:t]), Xbar, col = "darkblue", type = "l", lwd = 2) #"darkblue" # analysis - ciEnvelope(as.Date(obs.times[t1:t]), XaCI[, 1], XaCI[, 2], col = alphapink) #alphapink - lines(as.Date(obs.times[t1:t]), Xa, col = "black", lty = 2, lwd = 2) #"black" + PEcAn.visualization::ciEnvelope(as.Date(obs.times[t1:t]), XaCI[, 1], XaCI[, 2], col = sda_colors$pink) #alphapink + graphics::lines(as.Date(obs.times[t1:t]), Xa, col = "black", lty = 2, lwd = 2) #"black" - legend('topright',c('Forecast','Data','Analysis'),col=c(alphablue,alphagreen,alphapink),lty=1,lwd=5) + graphics::legend( + 'topright', + c('Forecast', 'Data', 'Analysis'), + col=c(sda_colors$blue, sda_colors$green, sda_colors$pink), + lty=1, + lwd=5) } - dev.off() + grDevices::dev.off() } @@ -239,9 +259,15 @@ postana.bias.plotting.sda<-function(settings, t, obs.times, obs.mean, obs.cov, o "needed by `PEcAnAssimSequential::postana.bias.plotting.sda()`.", "Please install it and try again.") } + if (!requireNamespace("PEcAn.visualization", quietly = TRUE)) { + PEcAn.logger::logger.error( + "Can't find package 'PEcan.visualization',", + "needed by `PEcAnAssimSequential::postana.bias.plotting.sda()`.", + "Please install it and try again.") + } #Defining some colors - generate_colors_sda() + sda_colors <- generate_colors_sda() t1 <- 1 ylab.names <- unlist(sapply(settings$state.data.assimilation$state.variable, function(x) { x })[2, ], use.names = FALSE) @@ -254,16 +280,16 @@ postana.bias.plotting.sda<-function(settings, t, obs.times, obs.mean, obs.cov, o tmp })) #---- - pdf(file.path(settings$outdir,"SDA", "bias.diagnostic.pdf")) + grDevices::pdf(file.path(settings$outdir,"SDA", "bias.diagnostic.pdf")) for (i in seq_along(obs.mean[[1]])) { Xbar <- plyr::laply(FORECAST[t1:t], function(x) { mean(x[, i], na.rm = TRUE) }) - Xci <- plyr::laply(FORECAST[t1:t], function(x) { quantile(x[, i], c(0.025, 0.975)) }) + Xci <- plyr::laply(FORECAST[t1:t], function(x) { stats::quantile(x[, i], c(0.025, 0.975)) }) Xa <- plyr::laply(ANALYSIS[t1:t], function(x) { mean(x[, i], na.rm = TRUE) }) - XaCI <- plyr::laply(ANALYSIS[t1:t], function(x) { quantile(x[, i], c(0.025, 0.975)) }) + XaCI <- plyr::laply(ANALYSIS[t1:t], function(x) { stats::quantile(x[, i], c(0.025, 0.975)) }) if(length(which(is.na(Ybar[,i])))>=length(t1:t)) next() - reg <- lm(Xbar[t1:t] - unlist(Ybar[, i]) ~ c(t1:t)) + reg <- stats::lm(Xbar[t1:t] - unlist(Ybar[, i]) ~ c(t1:t)) plot(t1:t, Xbar - unlist(Ybar[, i]), pch = 16, cex = 1, @@ -271,18 +297,19 @@ postana.bias.plotting.sda<-function(settings, t, obs.times, obs.mean, obs.cov, o xlab = "Time", ylab = "Error", main = paste(colnames(X)[i], " Error = Forecast - Data")) - ciEnvelope(rev(t1:t), + PEcAn.visualization::ciEnvelope(rev(t1:t), rev(Xci[, 1] - unlist(Ybar[, i])), rev(Xci[, 2] - unlist(Ybar[, i])), - col = alphabrown) - abline(h = 0, lty = 2, lwd = 2) - abline(reg) - mtext(paste("slope =", signif(summary(reg)$coefficients[2], digits = 3), - "intercept =", signif(summary(reg)$coefficients[1], digits = 3))) + col = sda_colors$brown) + graphics::abline(h = 0, lty = 2, lwd = 2) + graphics::abline(reg) + graphics::mtext(paste( + "slope =", signif(summary(reg)$coefficients[2], digits = 3), + "intercept =", signif(summary(reg)$coefficients[1], digits = 3))) # d<-density(c(Xbar[t1:t] - unlist(Ybar[t1:t,i]))) lines(d$y+1,d$x) # forecast minus analysis = update - reg1 <- lm(Xbar - Xa ~ c(t1:t)) + reg1 <- stats::lm(Xbar - Xa ~ c(t1:t)) plot(t1:t, Xbar - Xa, pch = 16, cex = 1, @@ -290,44 +317,46 @@ postana.bias.plotting.sda<-function(settings, t, obs.times, obs.mean, obs.cov, o xlab = "Time", ylab = "Update", main = paste(colnames(X)[i], "Update = Forecast - Analysis")) - ciEnvelope(rev(t1:t), + PEcAn.visualization::ciEnvelope(rev(t1:t), rev(Xbar - XaCI[, 1]), rev(Xbar - XaCI[, 2]), - col = alphapurple) - abline(h = 0, lty = 2, lwd = 2) - abline(reg1) - mtext(paste("slope =", signif(summary(reg1)$coefficients[2], digits = 3), - "intercept =", signif(summary(reg1)$coefficients[1], - digits = 3))) + col = sda_colors$purple) + graphics::abline(h = 0, lty = 2, lwd = 2) + graphics::abline(reg1) + graphics::mtext(paste( + "slope =", signif(summary(reg1)$coefficients[2], digits = 3), + "intercept =", signif(summary(reg1)$coefficients[1], + digits = 3))) # d<-density(c(Xbar[t1:t] - Xa[t1:t])) lines(d$y+1,d$x) } - dev.off() + grDevices::dev.off() } ##' @rdname interactive.plotting.sda +#' @param aqq,bqq shape parameters estimated over time for the process covariance ##' @export postana.bias.plotting.sda.corr<-function(t, obs.times, X, aqq, bqq){ t1<- 1 #Defining some colors - generate_colors_sda() + sda_colors <- generate_colors_sda() #--- - pdf('SDA/process.var.plots.pdf') + grDevices::pdf('SDA/process.var.plots.pdf') - cor.mat <- cov2cor(aqq[t,,] / bqq[t]) + cor.mat <- stats::cov2cor(aqq[t,,] / bqq[t]) colnames(cor.mat) <- colnames(X) rownames(cor.mat) <- colnames(X) - par(mfrow = c(1, 1), mai = c(1, 1, 4, 1)) + graphics::par(mfrow = c(1, 1), mai = c(1, 1, 4, 1)) corrplot::corrplot(cor.mat, type = "upper", tl.srt = 45,order='FPC') - par(mfrow=c(1,1)) + graphics::par(mfrow=c(1,1)) plot(as.Date(obs.times[t1:t]), bqq[t1:t], pch = 16, cex = 1, ylab = "Degrees of Freedom", xlab = "Time") - dev.off() + grDevices::dev.off() } ##' @rdname interactive.plotting.sda @@ -338,7 +367,7 @@ post.analysis.ggplot <- function(settings, t, obs.times, obs.mean, obs.cov, obs, t1 <- 1 #Defining some colors ready.OBS<-NULL - generate_colors_sda() + sda_colors <- generate_colors_sda() var.names <- sapply(settings$state.data.assimilation$state.variable, '[[', "variable.name") #---- #Analysis & Forcast cleaning and STAT @@ -348,11 +377,11 @@ post.analysis.ggplot <- function(settings, t, obs.times, obs.mean, obs.cov, obs, c("FORECAST", "ANALYSIS") %>% purrr::map_df(function(listFA) { All.my.data[[listFA]] %>% purrr::map_df(function(state.vars) { means <- apply(state.vars, 2, mean, na.rm = T) - CI <- apply(state.vars, 2, quantile, c(0.025, 0.975), + CI <- apply(state.vars, 2, stats::quantile, c(0.025, 0.975), na.rm = T) - rbind(means, CI) %>% t %>% as.data.frame() %>% mutate(Variables = paste(colnames(state.vars))) %>% + rbind(means, CI) %>% t %>% as.data.frame() %>% dplyr::mutate(Variables = paste(colnames(state.vars))) %>% tidyr::replace_na(list(0)) - }) %>% mutate( + }) %>% dplyr::mutate( Type = listFA, Date = rep( lubridate::ymd_hms(obs.times[t1:t], truncated = 3, tz = "EST"), @@ -370,8 +399,8 @@ post.analysis.ggplot <- function(settings, t, obs.times, obs.mean, obs.cov, obs, tryCatch({ ready.OBS<- names(obs.mean)%>% purrr::map(~c(obs.mean[.x],obs.cov[.x],.x)%>% - setNames(c('means','covs','Date')))%>% - setNames(names(obs.mean))%>% + stats::setNames(c('means','covs','Date')))%>% + stats::setNames(names(obs.mean))%>% purrr::map_df(function(one.day.data){ #CI @@ -380,9 +409,9 @@ post.analysis.ggplot <- function(settings, t, obs.times, obs.mean, obs.cov, obs, data.frame(mean-(sd*1.96), mean+(sd*1.96)) })%>% - mutate(Variables=names(one.day.data$means))%>% - `colnames<-`(c('2.5%','97.5%','Variables'))%>% - mutate(means=one.day.data$means%>%unlist, + dplyr::mutate(Variables=names(one.day.data$means))%>% + `colnames<-`(c('2.5%', '97.5%', 'Variables'))%>% + dplyr::mutate(means=one.day.data$means%>%unlist, Type="Data", Date=one.day.data$Date%>%as.POSIXct(tz="EST")) @@ -395,11 +424,11 @@ post.analysis.ggplot <- function(settings, t, obs.times, obs.mean, obs.cov, obs, ) ready.to.plot <- ready.OBS %>% - bind_rows(ready.FA) + dplyr::bind_rows(ready.FA) #Adding the units to the variables ready.to.plot$Variable %>% unique() %>% - walk(function(varin){ + purrr::walk(function(varin){ #find the unit unitp <- which(lapply(settings$state.data.assimilation$state.variable, "[", 'variable.name') %>% unlist %in% varin) if (length(unitp)>0) { @@ -413,25 +442,28 @@ post.analysis.ggplot <- function(settings, t, obs.times, obs.mean, obs.cov, obs, - p<-ready.to.plot%>% - ggplot2::ggplot(aes(x=Date))+ - geom_ribbon(aes(ymin=`2.5%`,ymax=`97.5%`,fill=Type),color="black")+ - geom_line(aes(y=means, color=Type),lwd=1.02,linetype=2)+ - geom_point(aes(y=means, color=Type),size=3,alpha=0.75)+ - scale_fill_manual(values = c(alphapink,alphagreen,alphablue),name="")+ - scale_color_manual(values = c(alphapink,alphagreen,alphablue),name="")+ - theme_bw(base_size = 17)+ - facet_wrap(~Variables, scales = "free", ncol=2)+ - theme(legend.position = "top", - strip.background = element_blank())->p - if (!is.null(plot.title)) p <- p + labs(title=plot.title) + p <- ready.to.plot %>% + ggplot2::ggplot(ggplot2::aes(x = Date)) + + ggplot2::geom_ribbon( + ggplot2::aes(ymin = .data$`2.5%`, ymax = .data$`97.5%`, fill = .data$Type), + color = "black") + + ggplot2::geom_line(ggplot2::aes(y = .data$means, color = .data$Type), lwd = 1.02, linetype = 2) + + ggplot2::geom_point(ggplot2::aes(y = .data$means, color = .data$Type), size = 3, alpha = 0.75) + + ggplot2::scale_fill_manual(values = c(sda_colors$pink, sda_colors$green, sda_colors$blue), name = "") + + ggplot2::scale_color_manual(values = c(sda_colors$pink, sda_colors$green, sda_colors$blue), name = "") + + ggplot2::theme_bw(base_size = 17) + + ggplot2::facet_wrap(~.data$Variables, scales = "free", ncol = 2) + + ggplot2::theme(legend.position = "top", strip.background = ggplot2::element_blank()) + if (!is.null(plot.title)) { + p <- p + ggplot2::labs(title = plot.title) + } - pdf("SDA/SDA.pdf", width = 14, height = 10, onefile = TRUE) + grDevices::pdf("SDA/SDA.pdf", width = 14, height = 10, onefile = TRUE) print(p) - dev.off() + grDevices::dev.off() #saving plot data save(p, ready.to.plot, file = file.path(settings$outdir,"SDA", "timeseries.plot.data.Rdata")) @@ -445,7 +477,7 @@ post.analysis.ggplot.violin <- function(settings, t, obs.times, obs.mean, obs.co t1 <- 1 #Defining some colors - generate_colors_sda() + sda_colors <- generate_colors_sda() var.names <- sapply(settings$state.data.assimilation$state.variable, '[[', "variable.name") #rearranging the forcast and analysis data @@ -457,18 +489,18 @@ post.analysis.ggplot.violin <- function(settings, t, obs.times, obs.mean, obs.co All.my.data[[listFA]]%>% purrr::map_df(function(state.vars){ state.vars%>%as.data.frame() - })%>%mutate(Type=listFA, + })%>%dplyr::mutate(Type=listFA, Date=rep(obs.times[t1:t], each=((All.my.data[[listFA]])[[1]]) %>% nrow()) ) })%>% - tidyr::gather(Variables, Value, -c(Type,Date)) + tidyr::gather(key = "Variables", value = "Value", -c("Type", "Date")) #Observed data #first merging mean and conv based on the day obs.df <- names(obs.mean)%>% purrr::map(~c(obs.mean[.x], obs.cov[.x], .x)%>% - setNames(c('means','covs','Date')))%>% - setNames(names(obs.mean))%>% + stats::setNames(c('means','covs','Date')))%>% + stats::setNames(names(obs.mean))%>% purrr::map_df(function(one.day.data){ #CI purrr::map2_df(sqrt(one.day.data$covs %>% purrr::map( ~ diag(.x)) %>% unlist), one.day.data$means, @@ -476,9 +508,9 @@ post.analysis.ggplot.violin <- function(settings, t, obs.times, obs.mean, obs.co data.frame(mean-(sd*1.96), mean+(sd*1.96)) })%>% - mutate(Variables=names(one.day.data$means))%>% - `colnames<-`(c('2.5%','97.5%','Variables'))%>% - mutate(means=one.day.data$means%>%unlist, + dplyr::mutate(Variables=names(one.day.data$means)) %>% + `colnames<-`(c('2.5%', '97.5%', 'Variables')) %>% + dplyr::mutate(means=one.day.data$means %>% unlist, Type="Data", Date=one.day.data$Date%>%as.POSIXct(tz="UTC")) @@ -488,7 +520,7 @@ post.analysis.ggplot.violin <- function(settings, t, obs.times, obs.mean, obs.co #Adding the units to the variables ready.FA$Variable %>% unique() %>% - walk(function(varin){ + purrr::walk(function(varin){ #find the unit unitp <- which(lapply(settings$state.data.assimilation$state.variable, "[", 'variable.name') %>% unlist %in% varin) if (length(unitp)>0) { @@ -501,26 +533,36 @@ post.analysis.ggplot.violin <- function(settings, t, obs.times, obs.mean, obs.co }) - p<-ready.FA%>% -# filter(Variables==vari)%>% - ggplot2::ggplot(aes(Date,Value))+ - geom_ribbon(aes(x=Date,y=means,ymin=`2.5%`,ymax=`97.5%`,fill=Type), data=obs.df, color="black")+ - geom_line(aes(y=means, color=Type),data=obs.df,lwd=1.02,linetype=2)+ - geom_violin(aes(x=Date,fill=Type,group=interaction(Date,Type)), position = position_dodge(width=0.9))+ - geom_jitter(aes(color=Type), position=position_jitterdodge(dodge.width=0.9))+ - scale_fill_manual(values = c(alphapink,alphagreen,alphablue))+ - scale_color_manual(values = c(alphapink,alphagreen,alphablue))+ - facet_wrap(~Variables, scales = "free", ncol=2)+ - theme_bw(base_size = 17)+ - # labs(y=paste(vari,'(',unit,')'))+ - theme(legend.position = "top", - strip.background = element_blank()) - if (!is.null(plot.title)) p <- p + labs(title=plot.title) - - - pdf("SDA/SDA.Violin.pdf", width = 14, height = 10, onefile = TRUE) + p <- ready.FA %>% + ggplot2::ggplot(ggplot2::aes(.data$Date, .data$Value)) + + ggplot2::geom_ribbon( + ggplot2::aes(x = .data$Date, y = .data$means, ymin = .data$`2.5%`, ymax = .data$`97.5%`, fill = .data$Type), + data = obs.df, + color = "black") + + ggplot2::geom_line( + ggplot2::aes(y = .data$means, color = .data$Type), + data = obs.df, + lwd = 1.02, + linetype = 2) + + ggplot2::geom_violin( + ggplot2::aes(x = .data$Date, fill = .data$Type, group = interaction(.data$Date, .data$Type)), + position = ggplot2::position_dodge(width = 0.9)) + + ggplot2::geom_jitter( + ggplot2::aes(color = .data$Type), + position = ggplot2::position_jitterdodge(dodge.width = 0.9)) + + ggplot2::scale_fill_manual(values = c(sda_colors$pink, sda_colors$green, sda_colors$blue)) + + ggplot2::scale_color_manual(values = c(sda_colors$pink, sda_colors$green, sda_colors$blue)) + + ggplot2::facet_wrap(~.data$Variables, scales = "free", ncol = 2) + + ggplot2::theme_bw(base_size = 17) + + ggplot2::theme(legend.position = "top", strip.background = ggplot2::element_blank()) + if (!is.null(plot.title)) { + p <- p + ggplot2::labs(title = plot.title) + } + + + grDevices::pdf("SDA/SDA.Violin.pdf", width = 14, height = 10, onefile = TRUE) print(p) - dev.off() + grDevices::dev.off() #saving plot data save(p, ready.FA, obs.df, file = file.path(settings$outdir,"SDA", "timeseries.violin.plot.data.Rdata")) @@ -528,6 +570,8 @@ post.analysis.ggplot.violin <- function(settings, t, obs.times, obs.mean, obs.co } ##' @rdname interactive.plotting.sda +#' @param facetg logical: Create a subpanel for each variable? +#' @param readsFF optional forward forecast ##' @export post.analysis.multisite.ggplot <- function(settings, t, obs.times, obs.mean, obs.cov, FORECAST, ANALYSIS, plot.title=NULL, facetg=FALSE, readsFF=NULL, Add_Map=FALSE){ @@ -587,7 +631,7 @@ post.analysis.multisite.ggplot <- function(settings, t, obs.times, obs.mean, obs #Defining some colors t1 <- 1 - generate_colors_sda() + sda_colors <- generate_colors_sda() varnames <- settings$state.data.assimilation$state.variable #just a check if (is.null(varnames)) varnames <- settings[[1]]$state.data.assimilation$state.variable @@ -597,7 +641,7 @@ post.analysis.multisite.ggplot <- function(settings, t, obs.times, obs.mean, obs var.names <- sapply(settings$state.data.assimilation$state.variable, '[[', "variable.name") site.ids <- attr(FORECAST[[1]], 'Site') - site.names <- settings %>% map(~.x[['run']] ) %>% map('site') %>% map('name') %>% unlist() %>% as.character() + site.names <- settings %>% purrr::map(~.x[['run']] ) %>% purrr::map('site') %>% purrr::map('name') %>% unlist() %>% as.character() #------------------------------------------------Data prepration #Analysis & Forcast cleaning and STAT @@ -610,18 +654,18 @@ post.analysis.multisite.ggplot <- function(settings, t, obs.times, obs.mean, obs #finding the mean and Ci for all the state variables site.ids %>% unique() %>% - map_df(function(site){ + purrr::map_df(function(site){ (state.vars)[,which(site.ids %in% site)] %>% as.data.frame %>% - mutate(Site=site) + dplyr::mutate(Site=site) }) %>% - tidyr::gather(Variable, Value, -c(Site)) %>% - group_by(Site,Variable) %>% - summarise( - Means=mean(Value, na.rm=T), - Lower=quantile(Value,0.025, na.rm=T), - Upper = quantile(Value, 0.975, na.rm = TRUE)) - }) %>% mutate(Type = paste0("SDA_", listFA), + tidyr::gather(key = "Variable", value = "Value", -c("Site")) %>% + dplyr::group_by(.data$Site,.data$Variable) %>% + dplyr::summarise( + Means = mean(.data$Value, na.rm = TRUE), + Lower = stats::quantile(.data$Value,0.025, na.rm = TRUE), + Upper = stats::quantile(.data$Value, 0.975, na.rm = TRUE)) + }) %>% dplyr::mutate(Type = paste0("SDA_", listFA), Date = rep(as.Date(names(FORECAST)), each = colnames((All.my.data[[listFA]])[[1]]) %>% length() / length(unique(site.ids))) %>% as.POSIXct() ) @@ -633,53 +677,54 @@ post.analysis.multisite.ggplot <- function(settings, t, obs.times, obs.mean, obs #first merging mean and conv based on the day ready.to.plot <- names(obs.mean)%>% purrr::map(~c(obs.mean[.x],obs.cov[.x],.x)%>% - setNames(c('means','covs','Date')))%>% - setNames(names(obs.mean))%>% + stats::setNames(c('means','covs','Date')))%>% + stats::setNames(names(obs.mean))%>% purrr::map_df(function(one.day.data){ one.day.data$means %>% - map_dfr(~.x) %>% - mutate(Site=names(one.day.data$means)) %>% - tidyr::gather(Variable,Means,-c(Site)) %>% - right_join(one.day.data$covs %>% - map_dfr(~ t(sqrt(as.numeric(diag_fix(.x)))) %>% + purrr::map_dfr(~.x) %>% + dplyr::mutate(Site = names(one.day.data$means)) %>% + tidyr::gather(key = "Variable", value = "Means", -c("Site")) %>% + dplyr::right_join(one.day.data$covs %>% + purrr::map_dfr(~ t(sqrt(as.numeric(diag_fix(.x)))) %>% data.frame %>% `colnames<-`(c(obs.var.names))) %>% - mutate(Site=names(one.day.data$covs)) %>% - tidyr::gather(Variable,Sd,-c(Site)), - by=c('Site','Variable')) %>% - mutate(Upper=Means+(Sd*1.96), - Lower=Means-(Sd*1.96))%>% + dplyr::mutate(Site = names(one.day.data$covs)) %>% + tidyr::gather(key = "Variable", value = "Sd", -c("Site")), + by = c('Site', 'Variable')) %>% + dplyr::mutate( + Upper = .data$Means + (.data$Sd*1.96), + Lower = .data$Means - (.data$Sd*1.96)) %>% # dropped the "_" from "SDA_Data" - mutate(Type="Data", + dplyr::mutate(Type="Data", Date=one.day.data$Date %>% as.POSIXct()) # mutate(Type="SDA_Data", # Date=one.day.data$Date %>% as.POSIXct()) })%>% - dplyr::select(-Sd) %>% - bind_rows(ready.FA) + dplyr::select(-.data$Sd) %>% + dplyr::bind_rows(ready.FA) #--- Adding the forward forecast if (!is.null(readsFF)){ readsFF.df<-readsFF %>% - map_df(function(siteX){ + purrr::map_df(function(siteX){ - siteX %>% map_df(function(DateX){ + siteX %>% purrr::map_df(function(DateX){ DateX %>% - map_df(~.x %>% t ) %>% - tidyr::gather(Variable, Value,-c(Date, Site)) %>% - group_by(Variable,Date, Site) %>% - summarise( - Means=mean(Value, na.rm=T), - Lower=quantile(Value,0.025, na.rm=T), - Upper=quantile(Value,0.975, na.rm=T)) %>% - mutate(Type="ForwardForecast") + purrr::map_df(~.x %>% t ) %>% + tidyr::gather(key = "Variable", value = "Value", -c("Date", "Site")) %>% + dplyr::group_by(.data$Variable,.data$Date, .data$Site) %>% + dplyr::summarise( + Means = mean(.data$Value, na.rm = TRUE), + Lower = stats::quantile(.data$Value, 0.025, na.rm = TRUE), + Upper = stats::quantile(.data$Value, 0.975, na.rm = TRUE)) %>% + dplyr::mutate(Type="ForwardForecast") }) }) ready.to.plot <- ready.to.plot %>% - bind_rows(readsFF.df) + dplyr::bind_rows(readsFF.df) } @@ -688,7 +733,7 @@ post.analysis.multisite.ggplot <- function(settings, t, obs.times, obs.mean, obs #Adding the units to the variables ready.to.plot$Variable %>% unique() %>% - walk(function(varin){ + purrr::walk(function(varin){ #find the unit unitp <- which(lapply(settings$state.data.assimilation$state.variable, "[", 'variable.name') %>% unlist %in% varin) if (length(unitp)>0) { @@ -705,25 +750,32 @@ post.analysis.multisite.ggplot <- function(settings, t, obs.times, obs.mean, obs filew <- 14 fileh <- 10 #for each site and for each variable - all.plots<-ready.to.plot$Site%>%unique() %>% + all.plots <- ready.to.plot$Site%>%unique() %>% purrr::map(function(site){ #plotting - ready.to.plot%>% - filter(Site==site)%>% - ggplot2::ggplot(aes(x=Date))+ - geom_ribbon(aes(ymin=Lower,ymax=Upper,fill=Type),color="black")+ - geom_line(aes(y=Means, color=Type),lwd=1.02,linetype=2)+ - geom_point(aes(y=Means, color=Type),size=3,alpha=0.75)+ - scale_fill_manual(values = c(alphabrown,alphapink,alphagreen,alphablue),name="")+ - scale_color_manual(values = c(alphabrown,alphapink,alphagreen,alphablue),name="")+ - theme_bw(base_size = 17)+ - labs(y="", subtitle=paste0("Site id: ",site))+ - theme(legend.position = "top", - strip.background = element_blank())->p - if (!is.null(plot.title)) p <- p + labs(title=plot.title) - p <- p + facet_wrap(~Variable, ncol=2, scales = "free_y") + p <- ready.to.plot %>% + dplyr::filter(.data$Site == site) %>% + ggplot2::ggplot(ggplot2::aes(x = Date)) + + ggplot2::geom_ribbon( + ggplot2::aes(ymin = .data$Lower, ymax = .data$Upper, fill = .data$Type), + color = "black") + + ggplot2::geom_line(ggplot2::aes(y = .data$Means, color = .data$Type), lwd = 1.02, linetype = 2) + + ggplot2::geom_point(ggplot2::aes(y = .data$Means, color = .data$Type), size = 3, alpha = 0.75) + + ggplot2::scale_fill_manual( + values = c(sda_colors$brown, sda_colors$pink, sda_colors$green, sda_colors$blue), + name = "") + + ggplot2::scale_color_manual( + values = c(sda_colors$brown, sda_colors$pink, sda_colors$green, sda_colors$blue), + name = "") + + ggplot2::theme_bw(base_size = 17) + + ggplot2::labs(y = "", subtitle=paste0("Site id: ",site)) + + ggplot2::theme(legend.position = "top", strip.background = ggplot2::element_blank()) + if (!is.null(plot.title)) { + p <- p + ggplot2::labs(title=plot.title) + } + p <- p + ggplot2::facet_wrap(~.data$Variable, ncol = 2, scales = "free_y") + list(p) - }) }else{ @@ -741,19 +793,27 @@ post.analysis.multisite.ggplot <- function(settings, t, obs.times, obs.mean, obs unitp <- which(lapply(settings$state.data.assimilation$state.variable, "[", 'variable.name') %>% unlist %in% varin) if (length(unitp)>0) unit <- settings$state.data.assimilation$state.variable[[unitp]]$unit #plotting - ready.to.plot%>% - filter(Variable==vari, Site==site)%>% - ggplot2::ggplot(aes(x=Date))+ - geom_ribbon(aes(ymin=Lower,ymax=Upper,fill=Type),color="black")+ - geom_line(aes(y=Means, color=Type),lwd=1.02,linetype=2)+ - geom_point(aes(y=Means, color=Type),size=3,alpha=0.75)+ - scale_fill_manual(values = c(alphabrown,alphapink,alphagreen,alphablue),name="")+ - scale_color_manual(values = c(alphabrown,alphapink,alphagreen,alphablue),name="")+ - theme_bw(base_size = 17)+ - labs(y=paste(vari,'(',unit,')'), subtitle=paste0("Site id: ",site))+ - theme(legend.position = "top", - strip.background = element_blank())->p - if (!is.null(plot.title)) p <- p + labs(title=plot.title) + p<- ready.to.plot %>% + dplyr::filter(.data$Variable == vari, .data$Sitev== site) %>% + ggplot2::ggplot(ggplot2::aes(x = Date)) + + ggplot2::geom_ribbon( + ggplot2::aes(ymin = .data$Lower, ymax = .data$Upper, fill = .data$Type), + color = "black") + + ggplot2::geom_line(ggplot2::aes(y = .data$Means, color = .data$Type), lwd = 1.02, linetype = 2) + + ggplot2::geom_point(ggplot2::aes(y = .data$Means, color = .data$Type), size = 3, alpha = 0.75) + + ggplot2::scale_fill_manual( + values = c(sda_colors$brown, sda_colors$pink, sda_colors$green, sda_colors$blue), + name = "") + + ggplot2::scale_color_manual( + values = c(sda_colors$brown, sda_colors$pink, sda_colors$green, sda_colors$blue), + name = "") + + ggplot2::theme_bw(base_size = 17) + + ggplot2::labs(y = paste(vari,'(',unit,')'), subtitle = paste0("Site id: ",site)) + + ggplot2::theme(legend.position = "top", strip.background = ggplot2::element_blank()) + if (!is.null(plot.title)) { + p <- p + ggplot2::labs(title=plot.title) + } + p }) }) @@ -769,17 +829,17 @@ post.analysis.multisite.ggplot <- function(settings, t, obs.times, obs.mean, obs t %>% as.data.frame()%>% `colnames<-`(c("Lon","Lat")) %>% - dplyr::mutate(Site=site.ids %>% unique(), - Name=site.names) + dplyr::mutate(Site=.data$site.ids %>% unique(), + Name=.data$site.names) suppressMessages({ - aoi_boundary_HARV <- sf::st_read(system.file("extdata", "eco-regionl2.json", package = "PEcAnAssimSequential")) + aoi_boundary_HARV <- sf::st_read(system.file("extdata", "eco-regionl2.json", package = "PEcAn.data.land")) }) #transform site locs into new projection - UTM 2163 site.locs.sp<-site.locs - coordinates(site.locs.sp) <- c("Lon", "Lat") - proj4string(site.locs.sp) <- sp::CRS("+proj=longlat +datum=WGS84") ## for example + sp::coordinates(site.locs.sp) <- c("Lon", "Lat") + sp::proj4string(site.locs.sp) <- sp::CRS("+proj=longlat +datum=WGS84") ## for example res <- sp::spTransform(site.locs.sp, sp::CRS("+proj=laea +lat_0=45 +lon_0=-100 +x_0=0 +y_0=0 +a=6370997 +b=6370997 +units=m +no_defs")) site.locs[,c(1,2)] <-res@coords @@ -788,28 +848,31 @@ post.analysis.multisite.ggplot <- function(settings, t, obs.times, obs.mean, obs obs.mean %>% purrr::map(names) %>% unlist() %>% as.character() %>% unique() #adding the column to site site.locs <- site.locs %>% - dplyr::mutate(Data = Site %in% sites.w.data) + dplyr::mutate(Data = .data$Site %in% sites.w.data) #plotting map.plot<- ggplot2::ggplot() + - geom_sf(aes(fill=NA_L1CODE),data = aoi_boundary_HARV, alpha=0.35,lwd=0,color="black")+ - geom_point(data = site.locs, - aes(x = Lon, y = Lat), - size = 2) + + ggplot2::geom_sf( + ggplot2::aes(fill = .data$NA_L1CODE), + data = aoi_boundary_HARV, + alpha=0.35, + lwd=0, + color="black") + + ggplot2::geom_point(data = site.locs, size = 2) + ggrepel::geom_label_repel( data = site.locs, - aes( - x = Lon, - y = Lat, - label = paste0(Site, "\n", Name), - color = Data, + ggplot2::aes( + x = .data$Lon, + y = .data$Lat, + label = paste0(.data$Site, "\n", .data$Name), + color = .data$Data, ), vjust = 1.2, fontface = "bold", size = 3.5 ) + #coord_sf(datum = sf::st_crs(2163),default = F)+ - scale_fill_manual(values = c("#a6cee3", + ggplot2::scale_fill_manual(values = c("#a6cee3", "#1f78b4","#b2df8a", "#33a02c","#fb9a99", "#e31a1c","#fdbf6f", @@ -820,21 +883,21 @@ post.analysis.multisite.ggplot <- function(settings, t, obs.times, obs.mean, obs "#ffd92f","#8dd3c7", "#80b1d3","#d9d9d9", "#fdbf6f"),name="Eco-Region")+ - scale_color_manual(values= c("#e31a1c","#33a02c"))+ - theme_minimal()+ - theme(axis.text = element_blank()) + ggplot2::scale_color_manual(values= c("#e31a1c","#33a02c"))+ + ggplot2::theme_minimal()+ + ggplot2::theme(axis.text = ggplot2::element_blank()) #----- Reordering the plots all.plots.print <-list(map.plot) for (i in seq_along(all.plots)) all.plots.print <-c(all.plots.print,all.plots[[i]]) - pdf(paste0(settings$outdir,"/SDA.pdf"),width = filew, height = fileh) + grDevices::pdf(paste0(settings$outdir,"/SDA.pdf"),width = filew, height = fileh) all.plots.print %>% purrr::map(~print(.x)) - dev.off() + grDevices::dev.off() }else{ - pdf(paste0(settings$outdir,"/SDA.pdf"),width = filew, height = fileh) + grDevices::pdf(paste0(settings$outdir,"/SDA.pdf"),width = filew, height = fileh) all.plots %>% purrr::map(~print(.x)) - dev.off() + grDevices::dev.off() } @@ -845,3 +908,162 @@ post.analysis.multisite.ggplot <- function(settings, t, obs.times, obs.mean, obs } + +##' @rdname interactive.plotting.sda +##' @param ANALYSIS Analysis object from the sda.output.Rdata. +##' @param FORECAST Forecast object from the sda.output.Rdata. +##' @param obs.mean obs.mean +##' @param obs.cov obs.cov +##' @param outdir physical path where the pdf will be stored. +##' @param pft.path Physical path of pft.csv file to allow by = pft option. +##' @param by arrange figures by var, pft, or site. +##' @param types data types that shown in the figure. +##' @param CI range of confidence interval. +##' @param unit list of unit used for y axis label. +##' @param style color option. +##' @param PDF_w width of exported PDF file, passed on to `base::pdf()`. +##' @param PDF_h height of exported PDF file, passed on to `base::pdf()`. +##' @param t.inds index of period that will be plotted. +##' @export +##' @author Dongchen Zhang +SDA_timeseries_plot <- function(ANALYSIS, FORECAST, obs.mean = NULL, obs.cov = NULL, outdir, pft.path = NULL, by = "site", types = c("FORECAST", "ANALYSIS", "OBS"), CI = c(0.025, 0.975), + unit = list(AbvGrndWood = "Mg/ha", LAI = "m2/m2", SoilMoistFrac = "", TotSoilCarb = "kg/m2"), + style = list(general_color = c("FORECAST" = "blue", "ANALYSIS" = "red", "OBS" = "black"), + fill_color = c("FORECAST" = "yellow", "ANALYSIS" = "green", "OBS" = "grey"), + title_color = "red"), + PDF_w = 20, + PDF_h = 16, + t.inds = NULL){ + #Check package availability. + if("try-error" %in% class(try(find.package("ggpubr"), silent = T))){ + PEcAn.logger::logger.info("Package ggpubr is not installed! Please install it and rerun the function!") + return(0) + } + #TODO: make page, font, line, point sizes adjustable. + time_points <- names(FORECAST) + if (!is.null(t.inds)) { + time_points <- time_points[t.inds] + } + site_ids <- attributes(FORECAST[[1]])$Site + var_names <- attributes(FORECAST[[1]])$dimnames[[2]] + #new diag function: fixed the bug when length==1 then it will return 0x0 matrix + diag_fix <- function(vector){ + if (length(vector)>1){ + return(diag(vector)) + }else if (length(vector)==1){ + return(vector) + } + } + #read pft.csv file for the option by == pft. + if(!is.null(pft.path)){ + pft <- utils::read.csv(pft.path) + } + #create database + DB <- data.frame() + for (id in sort(unique(site_ids))) { + for (time_point in time_points) { + for (var_name in sort(unique(var_names))) { + for (type in types) { + if(type == "OBS") { + obs_mean <- obs.mean[[time_point]][[id]][[var_name]] + if(length(obs_mean) == 0 | is.null(obs_mean)){ + next + }else{ + obs_cov <- diag_fix(obs.cov[[time_point]][[id]])[which(var_name == names(obs.mean[[time_point]][[id]]))] + MIN <- obs_mean - 1.96*sqrt(obs_cov) + MAX <- obs_mean + 1.96*sqrt(obs_cov) + MEAN <- obs_mean + } + } else { + temp_Dat <- get(type)[[time_point]] + site_ind <- which(id == site_ids) + var_ind <- which(var_name == var_names) + ind <- var_ind[which(var_ind %in% site_ind)] + MEAN <- mean(temp_Dat[,ind]) + MIN <- stats::quantile(temp_Dat[,ind], CI[1]) + MAX <- stats::quantile(temp_Dat[,ind], CI[2]) + } + if(MIN < 0) MIN <- 0 + DB <- rbind(DB, list(id = id, date = time_point, var_name = var_name, type = type, upper = MAX, lower = MIN, mean = MEAN)) + } + } + } + } + #if we plot by each site. + if(by == "site") { + p <- list() + for (site.id in sort(unique(site_ids))) { + site_p <- list() + for (var.name in sort(unique(var_names))) { + site_p <- rlist::list.append(site_p, dplyr::filter(DB, id == site.id & var_name == var.name) %>% + dplyr::select(-c(id, var_name)) %>% + dplyr::mutate(date = lubridate::ymd(date)) %>% + ggplot2::ggplot(ggplot2::aes(x=date)) + + ggplot2::geom_ribbon(ggplot2::aes(x = .data$date, ymin = .data$lower, ymax = .data$upper, fill=.data$type), inherit.aes = FALSE, alpha = 0.5) + + ggplot2::geom_line(ggplot2::aes(y=mean, color=type),lwd=0.5,linetype=2) + + ggplot2::geom_point(ggplot2::aes(y=mean, color=type), size=1.5, alpha=0.75) + + ggplot2::scale_fill_manual(values = style$fill_color) + + ggplot2::scale_color_manual(values = style$general_color) + + ggplot2::ylab(paste0(var.name, " (", unit[var.name], ")"))) + } + p <- rlist::list.append(p, ggpubr::annotate_figure(ggpubr::ggarrange(plotlist = site_p, common.legend = TRUE), + top = ggpubr::text_grob(site.id, color = style$title_color, face = "bold", size = 14))) + } + #if we plot by each state variable + } else if (by == "var") { + p <- list() + for (var.name in sort(unique(var_names))) { + var_p <- list() + for (site.id in sort(unique(site_ids))) { + var_p <- rlist::list.append(var_p, dplyr::filter(DB, id == site.id & var_name == var.name) %>% + dplyr::select(-c(id, var_name)) %>% + dplyr::mutate(date = lubridate::ymd(date)) %>% + ggplot2::ggplot(ggplot2::aes(x=date)) + + ggplot2::geom_ribbon(ggplot2::aes(x = .data$date, ymin = .data$lower, ymax = .data$upper, fill=.data$type), inherit.aes = FALSE, alpha = 0.5) + + ggplot2::geom_line(ggplot2::aes(y=mean, color=type),lwd=0.5,linetype=2) + + ggplot2::geom_point(ggplot2::aes(y=mean, color=type), size=1.5, alpha=0.75) + + ggplot2::scale_fill_manual(values = style$fill_color) + + ggplot2::scale_color_manual(values = style$general_color) + + ggplot2::ylab(paste0(var.name, " (", unit[var.name], ")")) + + ggplot2::ggtitle(site.id)) + } + p <- rlist::list.append(p, ggpubr::annotate_figure(ggpubr::ggarrange(plotlist = var_p, common.legend = TRUE), + top = ggpubr::text_grob(var.name, color = style$title_color, face = "bold", size = 14))) + } + #if we plot by each (pft * state variable) + } else if (by == "pft") { + if (!exists("pft")) { + PEcAn.logger::logger.info("Please provide the pdf path!") + return(0) + } else { + p <- list() + for (PFT in sort(unique(pft$pft))) { + site_id_pft <- pft$site[which(pft$pft == PFT)] + var_p <- list() + for (var.name in sort(unique(var_names))) { + site_p <- list() + for (site.id in sort(site_id_pft)) { + site_p <- rlist::list.append(site_p, dplyr::filter(DB, id == site.id & var_name == var.name) %>% + dplyr::select(-c(id, var_name)) %>% + dplyr::mutate(date = lubridate::ymd(date)) %>% + ggplot2::ggplot(ggplot2::aes(x=date)) + + ggplot2::geom_ribbon(ggplot2::aes(x = .data$date, ymin = .data$lower, ymax = .data$upper, fill=.data$type), inherit.aes = FALSE, alpha = 0.5) + + ggplot2::geom_line(ggplot2::aes(y=mean, color=type),lwd=0.5,linetype=2) + + ggplot2::geom_point(ggplot2::aes(y=mean, color=type), size=1.5, alpha=0.75) + + ggplot2::scale_fill_manual(values = style$fill_color) + + ggplot2::scale_color_manual(values = style$general_color) + + ggplot2::ylab(paste0(var.name, " (", unit[var.name], ")")) + + ggplot2::ggtitle(site.id)) + } + var_p <- rlist::list.append(var_p, ggpubr::annotate_figure(ggpubr::ggarrange(plotlist = site_p, common.legend = TRUE), + top = ggpubr::text_grob(paste(PFT, var.name), color = style$title_color, face = "bold", size = 14))) + } + p <- rlist::list.append(p, var_p) + } + } + } + #print pdf + grDevices::pdf(file.path(outdir, paste0("SDA_", by, ".pdf")),width = PDF_w, height = PDF_h) + print(p) + grDevices::dev.off() +} \ No newline at end of file diff --git a/modules/assim.sequential/R/version.R b/modules/assim.sequential/R/version.R new file mode 100644 index 00000000000..0e58d885272 --- /dev/null +++ b/modules/assim.sequential/R/version.R @@ -0,0 +1,3 @@ +# Set at package install time, used by pecan.all::pecan_version() +# to identify development versions of packages +.build_hash <- Sys.getenv("PECAN_GIT_REV", "unknown") diff --git a/modules/assim.sequential/inst/MultiSite-Exs/SDA/Create_Multi_settings.R b/modules/assim.sequential/inst/MultiSite-Exs/SDA/Create_Multi_settings.R index 61d8253e1e0..0202d56cede 100644 --- a/modules/assim.sequential/inst/MultiSite-Exs/SDA/Create_Multi_settings.R +++ b/modules/assim.sequential/inst/MultiSite-Exs/SDA/Create_Multi_settings.R @@ -9,14 +9,15 @@ start_date <- "2012/01/01" end_date <- "2021/12/31" #setup working space -outdir <- "/projectnb/dietzelab/dongchen/All_NEON_SDA/NEON42/SDA" -SDA_run_dir <- "/projectnb/dietzelab/dongchen/All_NEON_SDA/NEON42/SDA/run" -SDA_out_dir <- "/projectnb/dietzelab/dongchen/All_NEON_SDA/NEON42/SDA/out" +outdir <- "/projectnb/dietzelab/dongchen/anchorSites/SDA/" +SDA_run_dir <- "/projectnb/dietzelab/dongchen/anchorSites/SDA/run/" +SDA_out_dir <- "/projectnb/dietzelab/dongchen/anchorSites/SDA/out/" -ERA5_dir <- "/projectnb/dietzelab/dongchen/All_NEON_SDA/NEON42/ERA5_2012_2021/" -XML_out_dir <- "/projectnb/dietzelab/dongchen/All_NEON_SDA/NEON42/SDA/pecan.xml" +ERA5_dir <- "/projectnb/dietzelab/dongchen/anchorSites/ERA5_2012_2021/" +XML_out_dir <- "/projectnb/dietzelab/dongchen/anchorSites/SDA/pecan.xml" -pft_csv_dir <- "/projectnb/dietzelab/dongchen/All_NEON_SDA/NEON42/site_pft.csv" +pft_csv_dir <- "/projectnb/dietzelab/dongchen/anchorSites/site_pft.csv" +modis_phenology_dir <- "/projectnb/dietzelab/Cherry/pft_files/leaf_phenology.csv" #Obs_prep part #AGB @@ -44,9 +45,51 @@ SoilC_export_csv <- TRUE #Obs Date obs_start_date <- "2012-07-15" obs_end_date <- "2021-07-15" -obs_outdir <- "/projectnb/dietzelab/dongchen/All_NEON_SDA/test_OBS" +obs_outdir <- "/projectnb/dietzelab/dongchen/anchorSites/Obs" timestep <- list(unit="year", num=1) +#specify model binary +model_binary <- "/usr2/postdoc/istfer/SIPNET/trunk//sipnet_if" + +#specify host section +host.flag <- "local" +if (host.flag == "remote") { + #if we submit jobs through tunnel remotely. + host = structure(list( + name = "geo.bu.edu", + usr = "zhangdc", + folder = SDA_out_dir, + prerun = "module load R/4.1.2", + cdosetup = "module load cdo/2.0.6", + qsub = "qsub -l h_rt=24:00:00 -q 'geo*' -N @NAME@ -o @STDOUT@ -e @STDERR@ -S /bin/bash", + qsub.jobid = "Your job ([0-9]+) .*", + qstat = "qstat -j @JOBID@ || echo DONE", + tunnel = "~/Tunnel/Tunnel", + outdir = SDA_out_dir, + rundir = SDA_run_dir + )) +} else if (host.flag == "local") { + host = structure(list( + name = "localhost", + folder = SDA_out_dir, + outdir = SDA_out_dir, + rundir = SDA_run_dir + )) +} else if (host.flag == "rabbitmq") { + host = structure(list( + name = "localhost", + rabbitmq = structure(list( + uri = "amqp://guest:guest@pecan-rabbitmq:15672/%2F", + queue = "SIPNET_r136", + cp2cmd = "oc rsync @RUNDIR@ $(oc get pod -l app.kubernetes.io/name=pecan-model-sipnet-136 -o name):@RUNDIR@", + cpfcmd = "/data/bin/oc rsync @OUTDIR@ $(/data/bin/oc get pod -l app=dongchen-sda -o name):@OUTDIR@" + )), + folder = SDA_out_dir, + outdir = SDA_out_dir, + rundir = SDA_run_dir + )) + model_binary <- "/usr/local/bin/sipnet.r136" +} #Start building template template <- PEcAn.settings::Settings(list( ############################################################################ @@ -58,14 +101,19 @@ template <- PEcAn.settings::Settings(list( ############################################################################ state.data.assimilation = structure(list( process.variance = TRUE, + aqq.Init = 1, + bqq.Init = 1, adjustment = TRUE, censored.data = FALSE, + free.run = FALSE, FullYearNC = TRUE, NC.Overwrite = FALSE, NC.Prefix = "sipnet.out", - q.type = "SINGLE", + q.type = "vector", + by.site = FALSE, Localization.FUN = "Local.support", scalef = 1, + chains = 1, data = structure(list(format_id = 1000000040, input.id = 1000013298)), state.variables = structure(list( #you could add more state variables here @@ -122,7 +170,7 @@ template <- PEcAn.settings::Settings(list( ########################################################################### database = structure(list( bety = structure( - list(user = "bety", password = "bety", host = "128.197.168.114", + list(user = "bety", password = "bety", host = "10.241.76.27", dbname = "bety", driver = "PostgreSQL", write = "FALSE" )) )), @@ -192,7 +240,7 @@ template <- PEcAn.settings::Settings(list( type = "SIPNET", revision = "ssr", delete.raw = FALSE, - binary = "/usr2/postdoc/istfer/SIPNET/trunk//sipnet_if", + binary = model_binary, jobtemplate = "~/sipnet_geo.job" )), @@ -204,19 +252,7 @@ template <- PEcAn.settings::Settings(list( ########################################################################### ########################################################################### #be carefull of the host section, you need to specify the host of your own!!! - host = structure(list( - name = "geo.bu.edu", - usr = "zhangdc", - folder = "/projectnb/dietzelab/dongchen/All_NEON_SDA/NEON42/SDA/out", - prerun = "module load R/4.1.2", - cdosetup = "module load cdo/2.0.6", - qsub = "qsub -l h_rt=24:00:00 -q 'geo*' -N @NAME@ -o @STDOUT@ -e @STDERR@ -S /bin/bash", - qsub.jobid = "Your job ([0-9]+) .*", - qstat = "qstat -j @JOBID@ || echo DONE", - tunnel = "~/Tunnel/Tunnel", - outdir = "/projectnb/dietzelab/dongchen/All_NEON_SDA/NEON42/SDA/out", - rundir = "/projectnb/dietzelab/dongchen/All_NEON_SDA/NEON42/SDA/run" - )), + host = host, ############################################################################ ############################################################################ @@ -255,7 +291,8 @@ template <- PEcAn.settings::Settings(list( # )), # soilinitcond = structure(list(path = "/projectnb/dietzelab/ahelgeso/EFI_Forecast_Challenge/" # )), - pft.site = structure(list(path = "/projectnb/dietzelab/dongchen/All_NEON_SDA/NEON42/site_pft.csv")) + pft.site = structure(list(path = pft_csv_dir)), + leaf_phenology = structure(list(path = modis_phenology_dir)) )) )) )) @@ -268,8 +305,8 @@ template <- PEcAn.settings::Settings(list( ############################################################################ ############################################################################ -sitegroupId <- 1000000031 -nSite <- 39 +sitegroupId <- 1000000033 +nSite <- 330 multiRunSettings <- PEcAn.settings::createSitegroupMultiSettings( template, @@ -278,6 +315,9 @@ multiRunSettings <- PEcAn.settings::createSitegroupMultiSettings( if(file.exists(XML_out_dir)){ unlink(XML_out_dir) } + + + PEcAn.settings::write.settings(multiRunSettings, outputfile = "pecan.xml") #here we re-read the xml file to fix issues of some special character within the Host section. @@ -287,29 +327,6 @@ writeChar(tmp, XML_out_dir) settings <- PEcAn.settings::read.settings(XML_out_dir) -#iteratively grab ERA5 paths for each site -for (i in 1:nSite) { - temp_ERA5_path <- settings[[i]]$run$inputs$met$path - temp_site_id <- settings[[i]]$run$site$id - temp_full_paths <- list.files(path=paste0(temp_ERA5_path, temp_site_id), pattern = '*.clim', full.names = T) - - #need a better way to code it up - #test works!!!! - #populated IC file paths into settings - Create_mult_list <- function(list.names, paths){ - out <- as.list(paths) - names(out) <- list.names - out - } - settings[[i]]$run$inputs$met$path <- Create_mult_list(rep("path", length(temp_full_paths)), temp_full_paths) - - #code on met_start and met_end - settings[[i]]$run$site$met.start <- start_date - settings[[i]]$run$site$met.end <- end_date - settings[[i]]$run$start.date <- start_date - settings[[i]]$run$end.date <- end_date -} - #add Lat and Lon to each site #grab Site IDs from settings site_ID <- c() @@ -333,6 +350,26 @@ for (i in 1:nSite) { settings[[i]]$run$site$name <- site_info$sitename[index_site_info]#temp_ID } +#remove overlapped sites +site.locs <- settings$run %>% + purrr::map('site') %>% + purrr::map_dfr(~c(.x[['lon']],.x[['lat']]) %>% as.numeric)%>% + t %>% + `colnames<-`(c("lon","lat")) %>% data.frame +del.ind <- c() +for (i in 1:nrow(site.locs)) { + for (j in i:nrow(site.locs)) { + if (i == j) { + next + } + if (site.locs$lon[i] == site.locs$lon[j] && + site.locs$lat[i] == site.locs$lat[j]) { + del.ind <- c(del.ind, j) + } + } +} +settings <- settings[-del.ind] + ##### unlink(paste0(settings$outdir,"/pecan.xml")) PEcAn.settings::write.settings(settings, outputfile = "pecan.xml") diff --git a/modules/assim.sequential/inst/NEFI/README.html b/modules/assim.sequential/inst/NEFI/README.html index 6c67d52b5fe..db88d350e13 100644 --- a/modules/assim.sequential/inst/NEFI/README.html +++ b/modules/assim.sequential/inst/NEFI/README.html @@ -38,7 +38,7 @@ }; - @@ -488,7 +473,7 @@ function checkDate(date, field) { function printInfo($siteinfo, $var, $text) { if (isset($siteinfo[$var])) { $tmp = preg_replace('/\s\s+/', ' ', toXML($siteinfo[$var])); - echo " info+= \"${text} : ${tmp}
\";"; + echo " info+= \"{$text} : {$tmp}
\";"; } } ?> diff --git a/web/03a-ameriflux.php b/web/03a-ameriflux.php index 195be3a7f70..fc0916d5f44 100644 --- a/web/03a-ameriflux.php +++ b/web/03a-ameriflux.php @@ -77,14 +77,14 @@ function nextStep() { foreach($_REQUEST as $key => $value) { if (is_array($value)) { foreach($value as $v) { - echo ""; + echo ""; } } else { if(strcmp($key, "notes") == 0 ) { $str = htmlentities($value, ENT_QUOTES); - echo ""; + echo ""; } else { - echo ""; + echo ""; } } } @@ -101,14 +101,14 @@ function nextStep() { foreach($_REQUEST as $key => $value) { if (is_array($value)) { foreach($value as $v) { - echo ""; + echo ""; } } else { if (strcmp($key, "notes") == 0 ) { $str = htmlentities($value, ENT_QUOTES); - echo ""; + echo ""; } else if (strcmp($key, "fluxusername") != 0 ) { - echo ""; + echo ""; } } } diff --git a/web/03a-fluxnet.php b/web/03a-fluxnet.php index f7a9a5527bd..cae0228f40a 100644 --- a/web/03a-fluxnet.php +++ b/web/03a-fluxnet.php @@ -69,14 +69,14 @@ function nextStep() { foreach($_REQUEST as $key => $value) { if (is_array($value)) { foreach($value as $v) { - echo ""; + echo ""; } } else { if(strcmp($key, "notes") == 0 ) { $str = htmlentities($value, ENT_QUOTES); - echo ""; + echo ""; } else { - echo ""; + echo ""; } } } @@ -93,14 +93,14 @@ function nextStep() { foreach($_REQUEST as $key => $value) { if (is_array($value)) { foreach($value as $v) { - echo ""; + echo ""; } } else { if(strcmp($key, "notes") == 0 ) { $str = htmlentities($value, ENT_QUOTES); - echo ""; + echo ""; } else { - echo ""; + echo ""; } } } diff --git a/web/03a-narr.php b/web/03a-narr.php index f7c16747538..fae7d16c836 100644 --- a/web/03a-narr.php +++ b/web/03a-narr.php @@ -69,14 +69,14 @@ function nextStep() { foreach($_REQUEST as $key => $value) { if (is_array($value)) { foreach($value as $v) { - echo ""; + echo ""; } } else { if(strcmp($key, "notes") == 0) { $str = htmlentities($value, ENT_QUOTES); - echo ""; + echo ""; } else { - echo ""; + echo ""; } } } @@ -92,14 +92,14 @@ function nextStep() { foreach($_REQUEST as $key => $value) { if (is_array($value)) { foreach($value as $v) { - echo ""; + echo ""; } } else { if(strcmp($key, "notes") == 0) { $str = htmlentities($value, ENT_QUOTES); - echo ""; + echo ""; } else { - echo ""; + echo ""; } } } diff --git a/web/04-remote.php b/web/04-remote.php index c94fcac31fd..c9ac34a62ec 100644 --- a/web/04-remote.php +++ b/web/04-remote.php @@ -152,10 +152,10 @@ function nextStep() { foreach($_REQUEST as $key => $value) { if (is_array($value)) { foreach($value as $v) { - echo ""; + echo ""; } } else { - echo ""; + echo ""; } } ?> @@ -166,10 +166,10 @@ function nextStep() { foreach($_REQUEST as $key => $value) { if (is_array($value)) { foreach($value as $v) { - echo ""; + echo ""; } } else { - echo ""; + echo ""; } } ?> diff --git a/web/04-runpecan.php b/web/04-runpecan.php index 5f2f7609201..039ac52654e 100644 --- a/web/04-runpecan.php +++ b/web/04-runpecan.php @@ -42,7 +42,6 @@ $offline=isset($_REQUEST['offline']); $pecan_edit=isset($_REQUEST['pecan_edit']); $model_edit=isset($_REQUEST['model_edit']); -$browndog=isset($_REQUEST['browndog']); $qsub=isset($_REQUEST['qsub']); # parameters @@ -61,7 +60,7 @@ } $hostname=$_REQUEST['hostname']; if (!array_key_exists($hostname, $hostlist)) { - die("${hostname} is not an approved host"); + die("{$hostname} is not an approved host"); } $hostoptions = $hostlist[$hostname]; @@ -134,19 +133,19 @@ foreach($_REQUEST as $k => $v) { if (is_array($v)) { foreach($v as $x) { - $params .= "&${k}[]=$x"; + $params .= "&{$k}[]=$x"; } } else { if(strcmp($k, "notes") == 0) { $str = htmlentities($v, ENT_QUOTES); - $params .= "&${k}=$str"; + $params .= "&{$k}=$str"; } else { - $params .= "&${k}=$v"; + $params .= "&{$k}=$v"; } } } - $params .= "&msg=WARNING : Selected dates are not within the bounds of the weather data file you selected. START: ${startdate} ${metstart2} END: ${enddate} ${metend2}"; - header("Location: checkfailed.php?${params}"); + $params .= "&msg=WARNING : Selected dates are not within the bounds of the weather data file you selected. START: {$startdate} {$metstart2} END: {$enddate} {$metend2}"; + header("Location: checkfailed.php?{$params}"); exit(); } @@ -189,7 +188,7 @@ # folders $folder = $output_folder . DIRECTORY_SEPARATOR . 'PEcAn_' . $workflowid; -if ($pdo->query("UPDATE workflows SET folder='${folder}' WHERE id=${workflowid}") === FALSE) { +if ($pdo->query("UPDATE workflows SET folder='{$folder}' WHERE id={$workflowid}") === FALSE) { die('Can\'t update workflow : ' . (error_database())); } @@ -216,22 +215,22 @@ # create the folder(s) if (!mkdir($folder)) { - die("Can't create output folder [${folder}]"); + die("Can't create output folder [{$folder}]"); } if (!is_dir($dbfiles_folder) && !mkdir($dbfiles_folder, 0777, true)) { - die("Can't create output folder [${dbfiles_folder}]"); + die("Can't create output folder [{$dbfiles_folder}]"); } if ($hostname != $fqdn) { $tunnel_folder = $folder . DIRECTORY_SEPARATOR . "tunnel"; if (!mkdir($tunnel_folder)) { - die("Can't create output folder [${tunnel_folder}]"); + die("Can't create output folder [{$tunnel_folder}]"); } ## data tunnel if(isset($hostoptions['data_hostname'])){ $data_tunnel_folder = $tunnel_folder . DIRECTORY_SEPARATOR . "data"; if (!mkdir($data_tunnel_folder)) { - die("Can't create output folder [${data_tunnel_folder}]"); + die("Can't create output folder [{$data_tunnel_folder}]"); } } } @@ -245,34 +244,34 @@ fwrite($fh, " " . toXML($notes_xml) . "" . PHP_EOL); fwrite($fh, " " . get_userid() . "" . PHP_EOL); fwrite($fh, " " . get_user_name() . "" . PHP_EOL); -fwrite($fh, " ${runtime}" . PHP_EOL); +fwrite($fh, " {$runtime}" . PHP_EOL); fwrite($fh, " " . PHP_EOL); -fwrite($fh, " ${folder}" . PHP_EOL); +fwrite($fh, " {$folder}" . PHP_EOL); fwrite($fh, " " . PHP_EOL); fwrite($fh, " " . PHP_EOL); -fwrite($fh, " ${db_bety_username}" . PHP_EOL); -fwrite($fh, " ${db_bety_password}" . PHP_EOL); -fwrite($fh, " ${db_bety_hostname}" . PHP_EOL); +fwrite($fh, " {$db_bety_username}" . PHP_EOL); +fwrite($fh, " {$db_bety_password}" . PHP_EOL); +fwrite($fh, " {$db_bety_hostname}" . PHP_EOL); if (isset($db_bety_port)) { - fwrite($fh, " ${db_bety_port}" . PHP_EOL); + fwrite($fh, " {$db_bety_port}" . PHP_EOL); } -fwrite($fh, " ${db_bety_database}" . PHP_EOL); +fwrite($fh, " {$db_bety_database}" . PHP_EOL); fwrite($fh, " PostgreSQL" . PHP_EOL); fwrite($fh, " true" . PHP_EOL); fwrite($fh, " " . PHP_EOL); if (isset($db_fia_database) && ($db_fia_database != "")) { fwrite($fh, " " . PHP_EOL); - fwrite($fh, " ${db_fia_username}" . PHP_EOL); - fwrite($fh, " ${db_fia_password}" . PHP_EOL); - fwrite($fh, " ${db_fia_hostname}" . PHP_EOL); + fwrite($fh, " {$db_fia_username}" . PHP_EOL); + fwrite($fh, " {$db_fia_password}" . PHP_EOL); + fwrite($fh, " {$db_fia_hostname}" . PHP_EOL); if (isset($db_fia_port)) { - fwrite($fh, " ${db_fia_port}" . PHP_EOL); + fwrite($fh, " {$db_fia_port}" . PHP_EOL); } - fwrite($fh, " ${db_fia_database}" . PHP_EOL); + fwrite($fh, " {$db_fia_database}" . PHP_EOL); if ($db_fia_type == "mysql") { fwrite($fh, " MySQL" . PHP_EOL); } else if ($db_fia_type = "pgsql") { @@ -281,21 +280,14 @@ fwrite($fh, " " . PHP_EOL); } -fwrite($fh, " ${dbfiles_folder}" . PHP_EOL); +fwrite($fh, " {$dbfiles_folder}" . PHP_EOL); fwrite($fh, " " . PHP_EOL); -if ($browndog) { - fwrite($fh, " " . PHP_EOL); - fwrite($fh, " ${browndog_url}" . PHP_EOL); - fwrite($fh, " ${browndog_username}" . PHP_EOL); - fwrite($fh, " ${browndog_password}" . PHP_EOL); - fwrite($fh, " " . PHP_EOL); -} fwrite($fh, " " . PHP_EOL); foreach($pft as $p) { fwrite($fh, " " . PHP_EOL); - fwrite($fh, " ${p} " . PHP_EOL); + fwrite($fh, " {$p} " . PHP_EOL); fwrite($fh, " " . PHP_EOL); } fwrite($fh, " " . PHP_EOL); @@ -310,11 +302,11 @@ if (!empty($runs)){ fwrite($fh, " " . PHP_EOL); - fwrite($fh, " ${runs}" . PHP_EOL); - fwrite($fh, " ${variables}" . PHP_EOL); + fwrite($fh, " {$runs}" . PHP_EOL); + fwrite($fh, " {$variables}" . PHP_EOL); fwrite($fh, " " . PHP_EOL); fwrite($fh, " " . PHP_EOL); - fwrite($fh, " ${parm_method}" . PHP_EOL); + fwrite($fh, " {$parm_method}" . PHP_EOL); fwrite($fh, " " . PHP_EOL); fwrite($fh, " " . PHP_EOL); fwrite($fh, " sampling" . PHP_EOL); @@ -340,17 +332,17 @@ fwrite($fh, " " . PHP_EOL); fwrite($fh, " " . PHP_EOL); foreach($sensitivity as $s) { - fwrite($fh, " ${s}" . PHP_EOL); + fwrite($fh, " {$s}" . PHP_EOL); } fwrite($fh, " " . PHP_EOL); - fwrite($fh, " ${variables}" . PHP_EOL); + fwrite($fh, " {$variables}" . PHP_EOL); fwrite($fh, " " . PHP_EOL); } fwrite($fh, " " . PHP_EOL); -fwrite($fh, " ${modelid}" . PHP_EOL); +fwrite($fh, " {$modelid}" . PHP_EOL); if ($modeltype == "ED2") { - fwrite($fh, " ED2IN.r${revision}" . PHP_EOL); + fwrite($fh, " ED2IN.r{$revision}" . PHP_EOL); fwrite($fh, " " . PHP_EOL); fwrite($fh, " " . PHP_EOL); fwrite($fh, " 0.01" . PHP_EOL); @@ -362,9 +354,9 @@ fwrite($fh, " 0" . PHP_EOL); } if (isset($hostoptions['models'])) { - $model_version="${modeltype}"; - if (isset($hostoptions['models']["${modeltype} (r${revision})"])) { - $model_version="${modeltype} (r${revision})"; + $model_version="{$modeltype}"; + if (isset($hostoptions['models']["{$modeltype} (r{$revision})"])) { + $model_version="{$modeltype} (r{$revision})"; } if (isset($hostoptions['models'][$model_version])) { if (is_array($hostoptions['models'][$model_version])) { @@ -385,42 +377,44 @@ fwrite($fh, " " . PHP_EOL); fwrite($fh, " " . PHP_EOL); fwrite($fh, " " . PHP_EOL); -fwrite($fh, " ${siteid}" . PHP_EOL); -fwrite($fh, " ${metstart}" . PHP_EOL); -fwrite($fh, " ${metend}" . PHP_EOL); +fwrite($fh, " {$siteid}" . PHP_EOL); +fwrite($fh, " {$metstart}" . PHP_EOL); +fwrite($fh, " {$metend}" . PHP_EOL); fwrite($fh, " " . PHP_EOL); fwrite($fh, " " . PHP_EOL); foreach($_REQUEST as $key => $val) { if (substr($key, 0, 6) != "input_") continue; if ($val == -1) continue; $tag=substr($key, 6); - fwrite($fh, " <${tag}>" . PHP_EOL); + fwrite($fh, " <{$tag}>" . PHP_EOL); if (is_numeric($val)) { - fwrite($fh, " ${val}" . PHP_EOL); + fwrite($fh, " {$val}" . PHP_EOL); } else { $parts=explode(".", $val, 3); - fwrite($fh, " ${parts[0]}" . PHP_EOL); - fwrite($fh, " ${parts[1]}" . PHP_EOL); - fwrite($fh, " ${parts[2]}" . PHP_EOL); + fwrite($fh, " {$parts[0]}" . PHP_EOL); + fwrite($fh, " {$parts[1]}" . PHP_EOL); + if (count($parts) > 2) { + fwrite($fh, " {$parts[2]}" . PHP_EOL); + } if (isset($_REQUEST['fluxusername'])) { - fwrite($fh, " ${_REQUEST['fluxusername']}" . PHP_EOL); + fwrite($fh, " {$_REQUEST['fluxusername']}" . PHP_EOL); } } - fwrite($fh, " " . PHP_EOL); + fwrite($fh, " " . PHP_EOL); } fwrite($fh, " " . PHP_EOL); -fwrite($fh, " ${startdate}" . PHP_EOL); -fwrite($fh, " ${enddate}" . PHP_EOL); +fwrite($fh, " {$startdate}" . PHP_EOL); +fwrite($fh, " {$enddate}" . PHP_EOL); fwrite($fh, " " . PHP_EOL); fwrite($fh, " " . PHP_EOL); if ($hostname == $fqdn) { fwrite($fh, " localhost" . PHP_EOL); } else { - fwrite($fh, " ${hostname}" . PHP_EOL); + fwrite($fh, " {$hostname}" . PHP_EOL); } if ($tunnel_username != "") { - fwrite($fh, " ${tunnel_username}" . PHP_EOL); + fwrite($fh, " {$tunnel_username}" . PHP_EOL); } if (isset($hostoptions['folder'])) { $remote = $hostoptions['folder']; @@ -476,20 +470,20 @@ $url .= $_SERVER['HTTP_HOST'] . ':' . $_SERVER['SERVER_PORT']; $url .= str_replace("04-runpecan.php", "08-finished.php", $_SERVER["SCRIPT_NAME"]); if ($offline) { - $url .= "?workflowid=${workflowid}&offline=offline"; + $url .= "?workflowid={$workflowid}&offline=offline"; } else { - $url .= "?workflowid=${workflowid}"; + $url .= "?workflowid={$workflowid}"; } fwrite($fh, " " . PHP_EOL); - fwrite($fh, " ${email}" . PHP_EOL); - fwrite($fh, " ${url}" . PHP_EOL); + fwrite($fh, " {$email}" . PHP_EOL); + fwrite($fh, " {$url}" . PHP_EOL); fwrite($fh, " " . PHP_EOL); } fwrite($fh, "
" . PHP_EOL); fclose($fh); # copy workflow -copy("workflow.R", "${folder}/workflow.R"); +copy("workflow.R", "{$folder}/workflow.R"); # create the tunnel if ($hostname != $fqdn) { @@ -499,7 +493,7 @@ fclose($fh); # start tunnel - pclose(popen("${SSHtunnel} ${hostname} ${tunnel_username} ${tunnel_folder} > ${tunnel_folder}/log &", 'r')); + pclose(popen("{$SSHtunnel} {$hostname} {$tunnel_username} {$tunnel_folder} > {$tunnel_folder}/log &", 'r')); ## data tunnel if(isset($hostoptions['data_hostname'])){ @@ -509,21 +503,21 @@ fclose($fh); # start tunnel - pclose(popen("${SSHtunnel} ${hostoptions['data_hostname']} ${tunnel_username} ${data_tunnel_folder} > ${data_tunnel_folder}/log &", 'r')); + pclose(popen("{$SSHtunnel} {$hostoptions['data_hostname']} {$tunnel_username} {$data_tunnel_folder} > {$data_tunnel_folder}/log &", 'r')); } } # redirect to the right location if ($pecan_edit) { - $path = "06-edit.php?workflowid=$workflowid&pecan_edit=pecan_edit&hostname=${hostname}"; + $path = "06-edit.php?workflowid=$workflowid&pecan_edit=pecan_edit&hostname={$hostname}"; if ($model_edit) { $path .= "&model_edit=model_edit"; } if ($offline) { $path .= "&offline=offline"; } - header("Location: ${path}"); + header("Location: {$path}"); } else if (isset($hostoptions['rabbitmq_uri'])) { $rabbitmq_uri = $hostoptions['rabbitmq_uri']; if (isset($hostoptions['rabbitmq_queue'])) { @@ -541,7 +535,7 @@ send_rabbitmq_message($message, $rabbitmq_uri, $rabbitmq_queue); #done - $path = "05-running.php?workflowid=$workflowid&hostname=${hostname}"; + $path = "05-running.php?workflowid=$workflowid&hostname={$hostname}"; if ($pecan_edit) { $path .= "&pecan_edit=pecan_edit"; } @@ -551,7 +545,7 @@ if ($offline) { $path .= "&offline=offline"; } - header("Location: ${path}"); + header("Location: {$path}"); } else { # start the actual workflow chdir($folder); @@ -563,7 +557,7 @@ } #done - $path = "05-running.php?workflowid=$workflowid&hostname=${hostname}"; + $path = "05-running.php?workflowid=$workflowid&hostname={$hostname}"; if ($pecan_edit) { $path .= "&pecan_edit=pecan_edit"; } @@ -573,7 +567,7 @@ if ($offline) { $path .= "&offline=offline"; } - header("Location: ${path}"); + header("Location: {$path}"); } close_database(); diff --git a/web/05-running.php b/web/05-running.php index d706b4d6758..5d347bfe672 100644 --- a/web/05-running.php +++ b/web/05-running.php @@ -34,7 +34,7 @@ } $hostname=$_REQUEST['hostname']; if (!array_key_exists($hostname, $hostlist)) { - die("${hostname} is not an approved host"); + die("{$hostname} is not an approved host"); } $hostoptions = $hostlist[$hostname]; @@ -79,7 +79,7 @@ if (isset($params['email']) && ($params['email'] != "")) { $url = (isset($_SERVER['HTTPS']) ? "https://" : "http://"); $url .= $_SERVER['HTTP_HOST'] . ':' . $_SERVER['SERVER_PORT'] . $_SERVER["SCRIPT_NAME"]; - $url .= "?workflowid=${workflowid}&loglines=${loglines}&hostname=${hostname}"; + $url .= "?workflowid={$workflowid}&loglines={$loglines}&hostname={$hostname}"; if ($offline) { $url .= "&offline=offline"; } @@ -93,7 +93,7 @@ $error = true; } if ($data[0] == "ADVANCED" && count($data) < 3) { - header( "Location: 06-edit.php?workflowid=${workflowid}&hostname=${hostname}${offline}"); + header( "Location: 06-edit.php?workflowid={$workflowid}&hostname={$hostname}{$offline}"); close_database(); exit; } @@ -184,24 +184,19 @@ function refresh() { foreach ($status as $line) { $data = explode("\t", $line); echo " \n"; - if ($data[0] == "BrownDog") { - echo " "; - echo "${data[0]} \"BrownDog\"\n"; - } else { - echo " ${data[0]}\n"; - } + echo " {$data[0]}\n"; if (count($data) >= 2) { - echo " ${data[1]}\n"; + echo " {$data[1]}\n"; } else { echo " \n"; } if (count($data) >= 3) { - echo " ${data[2]}\n"; + echo " {$data[2]}\n"; } else { echo " \n"; } if (count($data) >= 4) { - echo " ${data[3]}\n"; + echo " {$data[3]}\n"; } else { $line = "RUNNING"; if ($data[0] == "MODEL") { @@ -211,7 +206,7 @@ function refresh() { } } } - echo " ${line}\n"; + echo " {$line}\n"; } echo " \n"; } @@ -230,9 +225,9 @@ function refresh() { $lines=array(10, 20, 50, 100); foreach($lines as &$v) { if ($v == $loglines) { - echo ""; + echo ""; } else { - echo ""; + echo ""; } } ?> diff --git a/web/06-edit.php b/web/06-edit.php index 5016d9a7c61..e8f63884559 100644 --- a/web/06-edit.php +++ b/web/06-edit.php @@ -41,7 +41,7 @@ } $hostname=$_REQUEST['hostname']; if (!array_key_exists($hostname, $hostlist)) { - die("${hostname} is not an approved host"); + die("{$hostname} is not an approved host"); } $hostoptions = $hostlist[$hostname]; diff --git a/web/07-analysis.php b/web/07-analysis.php index 6b254c0d6dd..53247a49200 100644 --- a/web/07-analysis.php +++ b/web/07-analysis.php @@ -33,7 +33,6 @@ $pecan_edit = (isset($_REQUEST['pecan_edit'])) ? "checked" : ""; $adv_setup = (isset($_REQUEST['adv_setup'])) ? "checked" : ""; $model_edit = (isset($_REQUEST['model_edit'])) ? "checked" : ""; -$browndog = (isset($_REQUEST['browndog'])) ? "checked" : ""; $ensemble_analysis = (isset($_REQUEST['ensemble_analsysis'])) ? "checked" : ""; $sensitivity_analysis = (isset($_REQUEST['sensitivity'])) ? "checked" : ""; @@ -53,7 +52,7 @@ } $hostname=$_REQUEST['hostname']; if (!array_key_exists($hostname, $hostlist)) { - die("${hostname} is not an approved host"); + die("{$hostname} is not an approved host"); } $hostoptions = $hostlist[$hostname]; @@ -187,14 +186,14 @@ function nextStep() { $value){ if(is_array($value)) { foreach($value as $v) { - echo ""; + echo ""; } } else { if(strcmp($key, "notes") == 0) { $str = htmlentities($value, ENT_QUOTES); - echo ""; + echo ""; } else { - echo ""; + echo ""; } } } @@ -211,14 +210,14 @@ function nextStep() { $value){ if(is_array($value)) { foreach($value as $v) { - echo ""; + echo ""; } } else { if(strcmp($key, "notes") == 0) { $str = htmlentities($value, ENT_QUOTES); - echo ""; + echo ""; } else { - echo ""; + echo ""; } } } @@ -228,7 +227,7 @@ function nextStep() {
- + " onChange="validate();"/>
@@ -263,16 +262,6 @@ function nextStep() {
- - diff --git a/web/07-continue.php b/web/07-continue.php index 6e5142c5a44..3b5be86b4a3 100644 --- a/web/07-continue.php +++ b/web/07-continue.php @@ -39,7 +39,7 @@ } $hostname=$_REQUEST['hostname']; if (!array_key_exists($hostname, $hostlist)) { - die("${hostname} is not an approved host"); + die("{$hostname} is not an approved host"); } $hostoptions = $hostlist[$hostname]; @@ -53,7 +53,7 @@ $stmt->closeCursor(); close_database(); -$path = "05-running.php?workflowid=$workflowid&hostname=${hostname}"; +$path = "05-running.php?workflowid=$workflowid&hostname={$hostname}"; if ($pecan_edit) { $path .= "&pecan_edit=pecan_edit"; } diff --git a/web/08-finished.php b/web/08-finished.php index eeb1298c1af..f2ae1b54b7f 100644 --- a/web/08-finished.php +++ b/web/08-finished.php @@ -49,7 +49,7 @@ $params = array(); } if (isset($params['hostname'])) { - $hostname = "&hostname=${params['hostname']}"; + $hostname = "&hostname={$params['hostname']}"; } # check to make sure all is ok @@ -104,7 +104,7 @@ continue; } $pfts[$pft] = array(); - foreach(recursive_scandir("$folder/pft/${pft}", "") as $file) { + foreach(recursive_scandir("$folder/pft/{$pft}", "") as $file) { if (is_dir("$folder/pft/$pft/$file")) { continue; } @@ -145,7 +145,7 @@ $outfile[$runid][] = $file; if (preg_match('/^\d\d\d\d.nc$/', $file)) { $year = substr($file, 0, 4); - $vars = explode("\n", file_get_contents("${folder}/out/${runid}/${file}.var")); + $vars = explode("\n", file_get_contents("{$folder}/out/{$runid}/{$file}.var")); $outplot[$runid][$year] = array_filter($vars); sort($outplot[$runid][$year]); } @@ -253,7 +253,7 @@ foreach($y as $s) { $kv = explode(" ", $s, 2); if ($kv[1] == '') $kv[1] = $kv[0]; - print " outplot['$key']['$x']['{$kv[0]}'] = '${kv[1]}';\n"; + print " outplot['$key']['$x']['{$kv[0]}'] = '{$kv[1]}';\n"; } } } @@ -546,7 +546,7 @@ function startsWith(haystack, needle) {

- Documentation + Documentation
Chat Room
@@ -571,24 +571,19 @@ function startsWith(haystack, needle) { foreach ($status as $line) { $data = explode("\t", $line); echo " \n"; - if ($data[0] == "BrownDog") { - echo " "; - echo "${data[0]} \"BrownDog\"\n"; - } else { - echo " ${data[0]}\n"; - } + echo " {$data[0]}\n"; if (count($data) >= 2) { - echo " ${data[1]}\n"; + echo " {$data[1]}\n"; } else { echo " \n"; } if (count($data) >= 3) { - echo " ${data[2]}\n"; + echo " {$data[2]}\n"; } else { echo " \n"; } if (count($data) >= 4) { - echo " ${data[3]}\n"; + echo " {$data[3]}\n"; } else { echo " RUNNING\n"; } diff --git a/web/checkfailed.php b/web/checkfailed.php index a4f6008635d..322e9547c44 100644 --- a/web/checkfailed.php +++ b/web/checkfailed.php @@ -52,10 +52,10 @@ function nextStep() { $v) { if (is_array($v)) { foreach($v as $x) { - echo "\n"; + echo "\n"; } } else { - echo "\n"; + echo "\n"; } } ?> @@ -64,10 +64,10 @@ function nextStep() { $v) { if (is_array($v)) { foreach($v as $x) { - echo "\n"; + echo "\n"; } } else { - echo "\n"; + echo "\n"; } } ?> diff --git a/web/common.php b/web/common.php index e82330c5abc..0f69d769e07 100644 --- a/web/common.php +++ b/web/common.php @@ -14,7 +14,7 @@ function get_footer() { Terrestrial Ecosystems, Department of Energy (ARPA-E #DE-AR0000594 and #DE-AR0000598), Department of Defense, the Arizona Experiment Station, the Energy Biosciences Institute, and an Amazon AWS in Education Grant. - PEcAn Version 1.7.2"; + PEcAn Version 1.8.0.9000"; } function whoami() { @@ -42,7 +42,7 @@ function left_footer() { ?>

- Documentation + Documentation
Chat Room
@@ -55,10 +55,10 @@ function passvars($ignore) { if (!array_key_exists($key, $ignore)) { if (is_array($value)) { foreach($value as $v) { - echo ""; + echo ""; } } else { - echo ""; + echo ""; } } } @@ -84,16 +84,15 @@ function open_database() { global $pdo; try { - $pdo = new PDO("${db_bety_type}:host=${db_bety_hostname};dbname=${db_bety_database};port=${db_bety_port}", $db_bety_username, $db_bety_password); + $pdo = new PDO("{$db_bety_type}:host={$db_bety_hostname};dbname={$db_bety_database};port={$db_bety_port}", $db_bety_username, $db_bety_password); $pdo->setAttribute(PDO::ATTR_ERRMODE, PDO::ERRMODE_EXCEPTION); } catch (PDOException $e) { // handler to input database configurations manually $host = $_SERVER['HTTP_HOST']; - header("Location: http://$host/setups/edit.php?key=database&message=1",TRUE,307); - //echo "Something wrong :(
Connection failed: " . $e->getMessage(); + echo "Something wrong :(
Connection failed: " . $e->getMessage(); die(); } -// $pdo = new PDO("${db_bety_type}:host=${db_bety_hostname};dbname=${db_bety_database}", $db_bety_username, $db_bety_password); +// $pdo = new PDO("{$db_bety_type}:host={$db_bety_hostname};dbname={$db_bety_database}", $db_bety_username, $db_bety_password); } function close_database() { @@ -208,19 +207,19 @@ function get_page_acccess_level() { function make_rabbitmq_connection($rabbitmq_uri) { $rabbitmq = parse_url($rabbitmq_uri); $connection = new AMQPConnection(); - if ($rabbitmq['host']) { + if (!empty($rabbitmq['host'])) { $connection->setHost($rabbitmq['host']); } - if ($rabbitmq['port']) { + if (!empty($rabbitmq['port'])) { $connection->setPort($rabbitmq['port']); } - if ($rabbitmq['path']) { + if (!empty($rabbitmq['path'])) { $connection->setVhost(urldecode(ltrim($rabbitmq['path'], '/'))); } - if ($rabbitmq['user']) { + if (!empty($rabbitmq['user'])) { $connection->setLogin($rabbitmq['user']); } - if ($rabbitmq['pass']) { + if (!empty($rabbitmq['pass'])) { $connection->setPassword($rabbitmq['pass']); } $connection->connect(); diff --git a/web/config.example.php b/web/config.example.php index 30cda27f783..7243b53cd46 100644 --- a/web/config.example.php +++ b/web/config.example.php @@ -16,10 +16,6 @@ $db_fia_password=""; $db_fia_database=""; -# browdog information -$browndog_url=""; -$browndog_username=""; -$browndog_password=""; # R binary $Rbinary="/usr/bin/R"; diff --git a/web/curl.php b/web/curl.php index 0e4d81d6afe..95f140c1c33 100644 --- a/web/curl.php +++ b/web/curl.php @@ -22,7 +22,7 @@ function getURL() { $url .= "localhost"; } if (array_key_exists('SERVER_PORT', $_SERVER)) { - $url .= ":${_SERVER['SERVER_PORT']}"; + $url .= ":{$_SERVER['SERVER_PORT']}"; } } @@ -71,32 +71,32 @@ function getURL() { } $model = ""; while ($row = @$stmt->fetch(PDO::FETCH_ASSOC)) { - $model = "${row['model_name']} (v${row['revision']})"; + $model = "{$row['model_name']} (v{$row['revision']})"; } $stmt->closeCursor(); echo "
\n";
-echo "# model       : ${model}\n";
-echo "# site        : ${params['sitename']}\n";
-echo "# pft         : ${pft[0]}\n";
-echo "# time range  : ${params['start']} - ${params['end']}\n";
+echo "# model       : {$model}\n";
+echo "# site        : {$params['sitename']}\n";
+echo "# pft         : {$pft[0]}\n";
+echo "# time range  : {$params['start']} - {$params['end']}\n";
 echo "curl -v -X POST \\\n";
-echo "    -F 'hostname=${params['hostname']}' \\\n";
-echo "    -F 'modelid=${params['modelid']}' \\\n";
+echo "    -F 'hostname={$params['hostname']}' \\\n";
+echo "    -F 'modelid={$params['modelid']}' \\\n";
 echo "    -F 'sitegroupid=1' \\\n";
-echo "    -F 'siteid=${params['siteid']}' \\\n";
-echo "    -F 'sitename=${params['sitename']}' \\\n";
-echo "    -F 'pft[]=${pft[0]}' \\\n";
-echo "    -F 'start=${params['start']}' \\\n";
-echo "    -F 'end=${params['end']}' \\\n";
+echo "    -F 'siteid={$params['siteid']}' \\\n";
+echo "    -F 'sitename={$params['sitename']}' \\\n";
+echo "    -F 'pft[]={$pft[0]}' \\\n";
+echo "    -F 'start={$params['start']}' \\\n";
+echo "    -F 'end={$params['end']}' \\\n";
 foreach($params as $key => $value) {
     if (substr($key, 0, 6) === "input_" && $value !== "-1") {
-        echo "    -F '${key}=${value}' \\\n";
+        echo "    -F '{$key}={$value}' \\\n";
     }
 }
-echo "    -F 'email=${params['email']}' \\\n";
-echo "    -F 'notes=${params['notes']}' \\\n";
+echo "    -F 'email={$params['email']}' \\\n";
+echo "    -F 'notes={$params['notes']}' \\\n";
 echo "    '" . getURL() . "'\n";
 echo "
"; diff --git a/web/dataset.php b/web/dataset.php index 81c332e8c56..78ce5b0e9bd 100644 --- a/web/dataset.php +++ b/web/dataset.php @@ -99,12 +99,12 @@ $mime = "image/png"; $file = tempnam(sys_get_temp_dir(),'plot') . ".png"; if (!file_exists($datafile)) { - die("Invalid file name specified ${file}."); + die("Invalid file name specified {$file}."); } # execute command to create graph $escapedargs = escapeshellarg("--args $datafile $year $xvar $yvar $width $height $file"); - shell_exec("R_LIBS_USER='${R_library_path}' PECANSETTINGS='$folder/pecan.xml' ${Rbinary} CMD BATCH --vanilla $escapedargs plot.netcdf.R /tmp/plot.out"); + shell_exec("R_LIBS_USER='{$R_library_path}' PECANSETTINGS='$folder/pecan.xml' {$Rbinary} CMD BATCH --vanilla $escapedargs plot.netcdf.R /tmp/plot.out"); break; default: @@ -112,7 +112,7 @@ } if (!file_exists($file)) { - die("Invalid file name specified ${file}."); + die("Invalid file name specified {$file}."); } if ($mime != "") { header("Content-type: $mime"); diff --git a/web/delete.php b/web/delete.php index fc4ba70da96..81371077cfd 100644 --- a/web/delete.php +++ b/web/delete.php @@ -284,13 +284,13 @@ function nextStep() { echo "

Following files/folders could not be removed.

\n"; echo "
    \n"; foreach($deleted_files['kept'] as $file) { - echo "
  • ${file}
  • \n"; + echo "
  • {$file}
  • \n"; } echo "
\n"; echo "

Following files/folders are removed.

\n"; echo "
    \n"; foreach($deleted_files['removed'] as $file) { - echo "
  • ${file}
  • \n"; + echo "
  • {$file}
  • \n"; } echo "
\n"; } else { diff --git a/web/historylist.php b/web/historylist.php index 3461f26ce14..478c3aba861 100644 --- a/web/historylist.php +++ b/web/historylist.php @@ -48,7 +48,7 @@ } if ($where != "") { - $query .= "WHERE ${where}"; + $query .= "WHERE {$where}"; } $query .= "ORDER BY workflows.id DESC"; @@ -77,7 +77,7 @@ if ($row['value'] != '') { $params = json_decode($row['value'], true); } else { - $params = eval("return ${row['params']};"); + $params = eval("return {$row['params']};"); } if (file_exists($row['folder'] . DIRECTORY_SEPARATOR . "STATUS")) { $statusfile=file($row['folder'] . DIRECTORY_SEPARATOR . "STATUS"); @@ -100,7 +100,7 @@ if (($status == "") && ($row['finished_at'] == "")) { $url = "05-running.php?workflowid=" . $row['id']; if (isset($params['hostname'])) { - $url .= "&hostname=${params['hostname']}"; + $url .= "&hostname={$params['hostname']}"; } $status = "RUNNING"; } diff --git a/web/hostmodelinfo.php b/web/hostmodelinfo.php index 973c243d5f2..2c68bd63da1 100644 --- a/web/hostmodelinfo.php +++ b/web/hostmodelinfo.php @@ -199,7 +199,7 @@ function get_sites() { $subs = array(); $query = "SELECT DISTINCT format_id FROM inputs"; $query .= " INNER JOIN dbfiles ON inputs.id=dbfiles.container_id"; - $where = " WHERE inputs.site_id=${earth} AND dbfiles.container_type='Input'"; + $where = " WHERE inputs.site_id={$earth} AND dbfiles.container_type='Input'"; if ($host) { $query .= " INNER JOIN machines ON dbfiles.machine_id=machines.id"; $where .= " AND machines.hostname=?"; diff --git a/web/insert-site.php b/web/insert-site.php index b5656b78c1a..e14631b085e 100644 --- a/web/insert-site.php +++ b/web/insert-site.php @@ -138,7 +138,7 @@ } } - echo "${result['id']} ${result['sitename']} ${sitegroupid}"; + echo "{$result['id']} {$result['sitename']} {$sitegroupid}"; } ?> diff --git a/web/js/browndog.js b/web/js/browndog.js deleted file mode 100644 index 79958a07e25..00000000000 --- a/web/js/browndog.js +++ /dev/null @@ -1,52 +0,0 @@ -//Brown Dog graph -function browndog_add() { - var node = document.getElementById("browndog_img"); - if (node) return; - - var graphic = $("") - .attr("src", "images/browndog-small-transparent.gif") - .attr("width", "25") - .attr("id", "browndog_img") - .css("position", "absolute") - .css("left", "0px") - .css("bottom", "45px"); - $("body").append(graphic); - - setTimeout(browndog_run, 10); -} - -function browndog_del() { - var node = document.getElementById("browndog_img"); - node.parentNode.removeChild(node); - - node = document.getElementById("browndog_poweredby"); - node.parentNode.removeChild(node); -} - -function browndog_run() { - var graphic = document.getElementById("browndog_img"); - graphic.style.left = parseInt(graphic.style.left) + 25 + "px"; - - if (parseInt(graphic.style.left) < $(window).width() - 50) { - setTimeout(browndog_run, 10); - } else { - //graphic.remove(); - graphic.parentNode.removeChild(graphic); - - //Add powered by graphic - graphic = $("") - .attr("src", "images/poweredby-transparent.gif") - .attr("id", "browndog_img") - .attr("width", "100"); - - var link = $("") - .attr("href", "http://browndog.ncsa.illinois.edu") - .attr("id", "browndog_poweredby") - .css("position", "fixed") - .css("right", "10px") - .css("bottom", "30px") - .append(graphic); - - $("body").append(link); - } -} diff --git a/web/param2json.php b/web/param2json.php index f9eb38acbbb..341edd467de 100644 --- a/web/param2json.php +++ b/web/param2json.php @@ -7,12 +7,12 @@ $row = $stmt->fetch(PDO::FETCH_ASSOC); $stmt->closeCursor(); if (array_key_exists('id', $row)) { - $where = "AND workflows.id >= ${row['id']}000000000 AND workflows.id <= ${row['id']}999999999"; + $where = "AND workflows.id >= {$row['id']}000000000 AND workflows.id <= {$row['id']}999999999"; } else { $where = "AND workflows.id >= 99000000000 AND workflows.id <= 99999999999"; } -$query = "SELECT workflows.id, workflows.params, attributes.value FROM workflows LEFT OUTER JOIN attributes ON workflows.id=attributes.container_id AND attributes.container_type='workflows' WHERE params != '' AND value is null ${where} ORDER BY workflows.id DESC"; +$query = "SELECT workflows.id, workflows.params, attributes.value FROM workflows LEFT OUTER JOIN attributes ON workflows.id=attributes.container_id AND attributes.container_type='workflows' WHERE params != '' AND value is null {$where} ORDER BY workflows.id DESC"; $stmt = $pdo->prepare($query); if ($stmt->execute() === FALSE) { die('Invalid query: ' . error_database()); @@ -23,7 +23,7 @@ print "
";
 
 while ($row = $stmt->fetch(PDO::FETCH_ASSOC)) {
-    $params = eval("return ${row['params']};");
+    $params = eval("return {$row['params']};");
     foreach ($ignore_vars as $x) {
         unset($params[$x]);
     }
@@ -36,8 +36,8 @@
       die('Invalid query: ' . error_database());
     }
 
-    $pdo->exec("UPDATE workflows SET params = '' WHERE id = ${row['id']}");
-    print("updated workflow with id=${row['id']}
"); + $pdo->exec("UPDATE workflows SET params = '' WHERE id = {$row['id']}"); + print("updated workflow with id={$row['id']}
"); } print "
"; diff --git a/web/setups/core.php b/web/setups/core.php index f93c98ac100..467c51858df 100644 --- a/web/setups/core.php +++ b/web/setups/core.php @@ -26,7 +26,6 @@ switch ($key) { case 'all': $pattern = '/^\$/i'; break; // not working properly - case 'browndog': $pattern = '/\$browndog*/i'; break; case 'database': $pattern = '/\$db_bety_*/i'; break; case 'fiadb': $pattern = '/\$db_fia_*/i'; break; case 'client_sceret': $pattern = '/\$client_sceret*/i'; break; diff --git a/web/setups/page.template.php b/web/setups/page.template.php index c2e3486c1c9..8ef5f641b48 100644 --- a/web/setups/page.template.php +++ b/web/setups/page.template.php @@ -42,7 +42,6 @@

This is the Admin Pages.

List of available configurations

Database
- Browndog
FIA Database
Change Password

diff --git a/web/workflow.R b/web/workflow.R index 1fab4b6062a..ea5ae7d9510 100755 --- a/web/workflow.R +++ b/web/workflow.R @@ -31,6 +31,10 @@ options(error = quote({ # ---------------------------------------------------------------------- # PEcAn Workflow # ---------------------------------------------------------------------- + +# Report package versions for provenance +PEcAn.all::pecan_version() + # Open and read in settings file for PEcAn run. settings <- PEcAn.settings::read.settings(args$settings)