diff --git a/.github/workflows/ci_unit_tests.yaml b/.github/workflows/ci_unit_tests.yaml new file mode 100644 index 00000000000..e22f63bf566 --- /dev/null +++ b/.github/workflows/ci_unit_tests.yaml @@ -0,0 +1,64 @@ +name: CI Unit Tests +on: [pull_request, push, workflow_dispatch] + +jobs: + + ci_pytest: + runs-on: ubuntu-latest + name: Run unit tests on CI system + permissions: + checks: write + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: 3.11.8 + + - name: Install dependencies + run: | + sudo apt-get update + sudo apt-get install -y perl libxml-libxml-perl libxml-libxslt-perl libdatetime-perl + python -m pip install --upgrade pip + pip install pytest + pip install wxflow + pip install wget + + - name: Cache Rocoto Install + uses: actions/cache@v4 + with: + path: ~/rocoto + key: ${{ runner.os }}-rocoto-${{ hashFiles('**/ci-unit_tests.yaml') }} + + - name: Install Rocoto + run: | + if [ ! -d "$HOME/rocoto/bin" ]; then + git clone https://github.com/christopherwharrop/rocoto.git $HOME/rocoto + cd $HOME/rocoto + ./INSTALL + fi + echo "$HOME/rocoto/bin" >> $GITHUB_PATH + + - name: Run tests + shell: bash + run: | + sudo mkdir -p /scratch1/NCEPDEV + cd $GITHUB_WORKSPACE/sorc + git submodule update --init --recursive + ./link_workflow.sh + cd $GITHUB_WORKSPACE/ci/scripts/tests + ln -s ../wxflow + + pytest -v --junitxml $GITHUB_WORKSPACE/ci/scripts/tests/test-results.xml + + + - name: Publish Test Results + if: always() + uses: EnricoMi/publish-unit-test-result-action@v2 + with: + files: ci/scripts/tests/test-results.xml + job_summary: true + comment_mode: off diff --git a/ci/Jenkinsfile b/ci/Jenkinsfile index 956bd692dd5..05d38b7898c 100644 --- a/ci/Jenkinsfile +++ b/ci/Jenkinsfile @@ -14,7 +14,7 @@ pipeline { options { skipDefaultCheckout() - //parallelsAlwaysFailFast() + parallelsAlwaysFailFast() } stages { // This initial stage is used to get the Machine name from the GitHub labels on the PR @@ -90,9 +90,6 @@ pipeline { stage('3. Build System') { matrix { agent { label NodeName[machine].toLowerCase() } - //options { - // throttle(['global_matrix_build']) - //} axes { axis { name 'system' @@ -102,6 +99,7 @@ pipeline { stages { stage('build system') { steps { + catchError(buildResult: 'UNSTABLE', stageResult: 'FAILURE') { script { def HOMEgfs = "${CUSTOM_WORKSPACE}/${system}" // local HOMEgfs is used to build the system on per system basis under the custome workspace for each buile system sh(script: "mkdir -p ${HOMEgfs}") @@ -120,8 +118,8 @@ pipeline { if (env.CHANGE_ID) { sh(script: """${GH} pr comment ${env.CHANGE_ID} --repo ${repo_url} --body "Checkout **Failed** on ${Machine}: ${e.getMessage()}" """) } - echo "Failed to checkout: ${e.getMessage()}" STATUS = 'Failed' + error("Failed to checkout: ${e.getMessage()}") } def gist_url = "" def error_logs = "" @@ -155,6 +153,7 @@ pipeline { } catch (Exception error_comment) { echo "Failed to comment on PR: ${error_comment.getMessage()}" } + STATUS = 'Failed' error("Failed to build system on ${Machine}") } } @@ -174,6 +173,7 @@ pipeline { } } } + } } } } @@ -181,7 +181,9 @@ pipeline { } stage('4. Run Tests') { - failFast false + when { + expression { STATUS != 'Failed' } + } matrix { agent { label NodeName[machine].toLowerCase() } axes { @@ -198,14 +200,21 @@ pipeline { expression { return caseList.contains(Case) } } steps { + catchError(buildResult: 'UNSTABLE', stageResult: 'FAILURE') { script { sh(script: "sed -n '/{.*}/!p' ${CUSTOM_WORKSPACE}/gfs/ci/cases/pr/${Case}.yaml > ${CUSTOM_WORKSPACE}/gfs/ci/cases/pr/${Case}.yaml.tmp") def yaml_case = readYaml file: "${CUSTOM_WORKSPACE}/gfs/ci/cases/pr/${Case}.yaml.tmp" system = yaml_case.experiment.system def HOMEgfs = "${CUSTOM_WORKSPACE}/${system}" // local HOMEgfs is used to populate the XML on per system basis env.RUNTESTS = "${CUSTOM_WORKSPACE}/RUNTESTS" - sh(script: "${HOMEgfs}/ci/scripts/utils/ci_utils_wrapper.sh create_experiment ${HOMEgfs}/ci/cases/pr/${Case}.yaml") + try { + error_output = sh(script: "${HOMEgfs}/ci/scripts/utils/ci_utils_wrapper.sh create_experiment ${HOMEgfs}/ci/cases/pr/${Case}.yaml", returnStdout: true).trim() + } catch (Exception error_create) { + sh(script: """${GH} pr comment ${env.CHANGE_ID} --repo ${repo_url} --body "${Case} **FAILED** to create experment on ${Machine}\n with the error:\n\\`\\`\\`\n${error_output}\\`\\`\\`" """) + error("Case ${Case} failed to create experment directory") + } } + } } } @@ -213,7 +222,6 @@ pipeline { when { expression { return caseList.contains(Case) } } - failFast false steps { script { HOMEgfs = "${CUSTOM_WORKSPACE}/gfs" // common HOMEgfs is used to launch the scripts that run the experiments @@ -255,11 +263,11 @@ pipeline { STATUS = 'Failed' try { sh(script: """${GH} pr edit ${env.CHANGE_ID} --repo ${repo_url} --remove-label "CI-${Machine}-Running" --add-label "CI-${Machine}-${STATUS}" """, returnStatus: true) - sh(script: """${GH} pr comment ${env.CHANGE_ID} --repo ${repo_url} --body "Experiment ${Case} **FAILED** on ${Machine} in\n\\`${CUSTOM_WORKSPACE}/RUNTESTS/${pslot}\\`" """) + sh(script: """${GH} pr comment ${env.CHANGE_ID} --repo ${repo_url} --body "Experiment ${Case} **FAILED** on ${Machine} in\n\\`${CUSTOM_WORKSPACE}/RUNTESTS/EXPDIR/${pslot}\\`" """) } catch (Exception e) { echo "Failed to update label from Running to ${STATUS}: ${e.getMessage()}" } - error("Failed to run experiments ${Case} on ${Machine}") + echo "Failed to run experiments ${Case} on ${Machine}" } } } @@ -268,6 +276,7 @@ pipeline { } } } + stage( '5. FINALIZE' ) { agent { label NodeName[machine].toLowerCase() } steps { diff --git a/ci/cases/yamls/gefs_ci_defaults.yaml b/ci/cases/yamls/gefs_ci_defaults.yaml index ceb36d4acb7..05a97edd900 100644 --- a/ci/cases/yamls/gefs_ci_defaults.yaml +++ b/ci/cases/yamls/gefs_ci_defaults.yaml @@ -1,4 +1,4 @@ defaults: !INC {{ HOMEgfs }}/parm/config/gefs/yaml/defaults.yaml base: - HPC_ACCOUNT: {{ 'HPC_ACCOUNT' | getenv }} + ACCOUNT: {{ 'HPC_ACCOUNT' | getenv }} diff --git a/ci/scripts/tests/test_create_experiment.py b/ci/scripts/tests/test_create_experiment.py new file mode 100644 index 00000000000..03f3a30805d --- /dev/null +++ b/ci/scripts/tests/test_create_experiment.py @@ -0,0 +1,29 @@ +from wxflow import Executable +from shutil import rmtree +import os +import copy + +_here = os.path.dirname(__file__) +HOMEgfs = os.sep.join(_here.split(os.sep)[:-3]) +RUNDIR = os.path.join(_here, 'testdata/RUNDIR') + + +def test_create_experiment(): + + create_experiment_script = Executable(f'{HOMEgfs}/workflow/create_experiment.py') + yaml_dir = yaml_dir = os.path.join(HOMEgfs, 'ci/cases/pr') + env = os.environ.copy() + env['RUNTESTS'] = RUNDIR + + for case in os.listdir(yaml_dir): + if case.endswith('.yaml'): + with open(os.path.join(yaml_dir, case), 'r') as file: + file_contents = file.read() + if 'ICSDIR_ROOT' not in file_contents: + create_experiment = copy.deepcopy(create_experiment_script) + create_experiment.add_default_arg(['-y', f'../../cases/pr/{case}', '--overwrite']) + env['pslot'] = os.path.splitext(case)[0] + create_experiment(env=env) + assert (create_experiment.returncode == 0) + + rmtree(RUNDIR) diff --git a/ci/scripts/tests/test_rocotostat.py b/ci/scripts/tests/test_rocotostat.py new file mode 100755 index 00000000000..f43f8df2f84 --- /dev/null +++ b/ci/scripts/tests/test_rocotostat.py @@ -0,0 +1,90 @@ +import sys +import os +from shutil import rmtree +import wget + +script_dir = os.path.dirname(os.path.abspath(__file__)) +sys.path.append(os.path.join(os.path.dirname(script_dir), 'utils')) + +from rocotostat import rocoto_statcount, rocotostat_summary, is_done, is_stalled, CommandNotFoundError +from wxflow import which + +test_data_url = 'https://noaa-nws-global-pds.s3.amazonaws.com/data/CI/' + +testdata_path = 'testdata/rocotostat' +testdata_full_path = os.path.join(script_dir, testdata_path) + + +if not os.path.isfile(os.path.join(testdata_full_path, 'database.db')): + os.makedirs(testdata_full_path, exist_ok=True) + workflow_url = test_data_url + str(testdata_path) + '/workflow.xml' + workflow_destination = os.path.join(testdata_full_path, 'workflow.xml') + wget.download(workflow_url, workflow_destination) + + database_url = test_data_url + str(testdata_path) + '/database.db' + database_destination = os.path.join(testdata_full_path, 'database.db') + wget.download(database_url, database_destination) + +try: + rocotostat = which('rocotostat') +except CommandNotFoundError: + raise CommandNotFoundError("rocotostat not found in PATH") + +rocotostat.add_default_arg(['-w', os.path.join(testdata_path, 'workflow.xml'), '-d', os.path.join(testdata_path, 'database.db')]) + + +def test_rocoto_statcount(): + + result = rocoto_statcount(rocotostat) + + assert result['SUCCEEDED'] == 20 + assert result['FAIL'] == 0 + assert result['DEAD'] == 0 + assert result['RUNNING'] == 0 + assert result['SUBMITTING'] == 0 + assert result['QUEUED'] == 0 + + +def test_rocoto_summary(): + + result = rocotostat_summary(rocotostat) + + assert result['CYCLES_TOTAL'] == 1 + assert result['CYCLES_DONE'] == 1 + + +def test_rocoto_done(): + + result = rocotostat_summary(rocotostat) + + assert is_done(result) + + rmtree(testdata_full_path) + + +def test_rocoto_stalled(): + testdata_path = 'testdata/rocotostat_stalled' + testdata_full_path = os.path.join(script_dir, testdata_path) + xml = os.path.join(testdata_full_path, 'stalled.xml') + db = os.path.join(testdata_full_path, 'stalled.db') + + if not os.path.isfile(os.path.join(testdata_full_path, 'stalled.db')): + os.makedirs(testdata_full_path, exist_ok=True) + workflow_url = test_data_url + str(testdata_path) + '/stalled.xml' + database_url = test_data_url + str(testdata_path) + '/stalled.db' + + workflow_destination = os.path.join(testdata_full_path, 'stalled.xml') + wget.download(workflow_url, workflow_destination) + + database_destination = os.path.join(testdata_full_path, 'stalled.db') + wget.download(database_url, database_destination) + + rocotostat = which('rocotostat') + rocotostat.add_default_arg(['-w', xml, '-d', db]) + + result = rocoto_statcount(rocotostat) + + assert result['SUCCEEDED'] == 11 + assert is_stalled(result) + + rmtree(testdata_full_path) diff --git a/ci/scripts/tests/test_setup.py b/ci/scripts/tests/test_setup.py new file mode 100755 index 00000000000..77a36369f46 --- /dev/null +++ b/ci/scripts/tests/test_setup.py @@ -0,0 +1,89 @@ +from wxflow import Executable, Configuration, ProcessError +from shutil import rmtree +import pytest +import os + +_here = os.path.dirname(__file__) +HOMEgfs = os.sep.join(_here.split(os.sep)[:-3]) +RUNDIR = os.path.join(_here, 'testdata/RUNDIR') +pslot = "C48_ATM" +account = "fv3-cpu" +foobar = "foobar" + + +def test_setup_expt(): + + arguments = [ + "gfs", "forecast-only", + "--pslot", pslot, "--app", "ATM", "--resdetatmos", "48", + "--comroot", f"{RUNDIR}", "--expdir", f"{RUNDIR}", + "--idate", "2021032312", "--edate", "2021032312", "--overwrite" + ] + setup_expt_script = Executable(os.path.join(HOMEgfs, "workflow", "setup_expt.py")) + setup_expt_script.add_default_arg(arguments) + setup_expt_script() + assert (setup_expt_script.returncode == 0) + + +def test_setup_xml(): + + setup_xml_script = Executable(os.path.join(HOMEgfs, "workflow/setup_xml.py")) + setup_xml_script.add_default_arg(f"{RUNDIR}/{pslot}") + setup_xml_script() + assert (setup_xml_script.returncode == 0) + + cfg = Configuration(f"{RUNDIR}/{pslot}") + base = cfg.parse_config('config.base') + assert base.ACCOUNT == account + + assert "UNKNOWN" not in base.values() + + with open(f"{RUNDIR}/{pslot}/{pslot}.xml", 'r') as file: + contents = file.read() + assert contents.count(account) > 5 + + rmtree(RUNDIR) + + +def test_setup_xml_fail_config_env_cornercase(): + + script_content = ('''#!/usr/bin/env bash +export HOMEgfs=foobar +../../../workflow/setup_xml.py "${1}"\n +''') + + with open('run_setup_xml.sh', 'w') as file: + file.write(script_content) + os.chmod('run_setup_xml.sh', 0o755) + + try: + setup_xml_script = Executable(os.path.join(HOMEgfs, "ci", "scripts", "tests", "run_setup_xml.sh")) + setup_xml_script.add_default_arg(f"{RUNDIR}/{pslot}") + setup_xml_script() + assert (setup_xml_script.returncode == 0) + + cfg = Configuration(f"{RUNDIR}/{pslot}") + base = cfg.parse_config('config.base') + assert base.ACCOUNT == account + + assert foobar not in base.values() + assert "UNKNOWN" not in base.values() + + with open(f"{RUNDIR}/{pslot}/{pslot}.xml", 'r') as file: + contents = file.read() + assert contents.count(account) > 5 + + except ProcessError as e: + # We expect this fail becuse ACCOUNT=fv3-cpu in config.base and environment + pass + except Exception as e: + # If an exception occurs, pass the test with a custom message + pytest.fail(f"Expected exception occurred: {e}") + + finally: + # Cleanup code to ensure it runs regardless of test outcome + os.remove('run_setup_xml.sh') + try: + rmtree(RUNDIR) + except FileNotFoundError: + pass diff --git a/ci/scripts/utils/publish_logs.py b/ci/scripts/utils/publish_logs.py index 7768c17c100..283c84a8d1c 100755 --- a/ci/scripts/utils/publish_logs.py +++ b/ci/scripts/utils/publish_logs.py @@ -46,7 +46,8 @@ def add_logs_to_gist(args, emcbot_gh): gist_files = {} for file in args.file: - file_content = file.read() + with open(file.name, 'r', encoding='latin-1') as file: + file_content = file.read() gist_files[os.path.basename(file.name)] = emcbot_gh.InputFileContent(file_content) gist = emcbot_gh.user.create_gist(public=True, files=gist_files, description=f"error log file from CI run {args.gist[0]}") @@ -85,7 +86,8 @@ def upload_logs_to_repo(args, emcbot_gh, emcbot_ci_url): break for file in args.file: - file_content = file.read() + with open(file.name, 'r', encoding='latin-1') as file: + file_content = file.read() file_path_in_repo = f"{repo_path}/{path_header}/" + str(os.path.basename(file.name)) emcbot_gh.repo.create_file(file_path_in_repo, "Adding error log file", file_content, branch="error_logs") diff --git a/ci/scripts/utils/rocotostat.py b/ci/scripts/utils/rocotostat.py index 9b1d8dcc3af..70c672f0e8e 100755 --- a/ci/scripts/utils/rocotostat.py +++ b/ci/scripts/utils/rocotostat.py @@ -14,6 +14,35 @@ def attempt_multiple_times(expression, max_attempts, sleep_duration=0, exception_class=Exception): + """ + Retries a function multiple times. + + Try to execute the function expression up to max_attempts times ignoring any exceptions + of the type exception_class, It waits for sleep_duration seconds between attempts. + + Parameters + ---------- + expression : callable + The function to be executed. + max_attempts : int + The maximum number of attempts to execute the function. + sleep_duration : int, optional + The number of seconds to wait between attempts. Default is 0. + exception_class : Exception, optional + The type of exception to catch. Default is the base Exception class, catching all exceptions. + + Returns + ------- + The return value of the function expression. + + Raises + ------ + exception_class + If the function expression raises an exception of type exception_class + in all max_attempts attempts. + + """ + attempt = 0 last_exception = None while attempt < max_attempts: @@ -189,7 +218,7 @@ def is_stalled(rocoto_status): error_return = rocoto_status['UNKNOWN'] rocoto_state = 'UNKNOWN' elif is_stalled(rocoto_status): - rocoto_status = attempt_multiple_times(rocoto_statcount(rocotostat), 2, 120, ProcessError) + rocoto_status = attempt_multiple_times(lambda: rocoto_statcount(rocotostat), 2, 120, ProcessError) if is_stalled(rocoto_status): error_return = 3 rocoto_state = 'STALLED' diff --git a/ecf/scripts/gdas/atmos/gempak/jgdas_atmos_gempak.ecf b/ecf/scripts/gdas/atmos/gempak/jgdas_atmos_gempak.ecf index 0bc2d764553..754d921f956 100755 --- a/ecf/scripts/gdas/atmos/gempak/jgdas_atmos_gempak.ecf +++ b/ecf/scripts/gdas/atmos/gempak/jgdas_atmos_gempak.ecf @@ -1,5 +1,5 @@ #PBS -S /bin/bash -#PBS -N %RUN%_atmos_gempak_%CYC% +#PBS -N %RUN%_atmos_gempak_%FHR3%_%CYC% #PBS -j oe #PBS -q %QUEUE% #PBS -A %PROJ%-%PROJENVIR% @@ -31,9 +31,13 @@ module load gempak/${gempak_ver} module list +############################################################# +# environment settings +############################################################# export cyc=%CYC% export cycle=t%CYC%z export USE_CFP=YES +export FHR3=%FHR3% ############################################################ # CALL executable job script here diff --git a/ecf/scripts/gfs/atmos/gempak/jgfs_atmos_gempak.ecf b/ecf/scripts/gfs/atmos/gempak/jgfs_atmos_gempak.ecf index 1994f238d14..e01fa35e57e 100755 --- a/ecf/scripts/gfs/atmos/gempak/jgfs_atmos_gempak.ecf +++ b/ecf/scripts/gfs/atmos/gempak/jgfs_atmos_gempak.ecf @@ -1,9 +1,9 @@ #PBS -S /bin/bash -#PBS -N %RUN%_atmos_gempak_%CYC% +#PBS -N %RUN%_atmos_gempak_%FHR3%_%CYC% #PBS -j oe #PBS -q %QUEUE% #PBS -A %PROJ%-%PROJENVIR% -#PBS -l walltime=03:00:00 +#PBS -l walltime=00:30:00 #PBS -l select=1:ncpus=28:mpiprocs=28:mem=2GB #PBS -l place=vscatter:shared #PBS -l debug=true @@ -37,6 +37,7 @@ module list ############################################################# export cyc=%CYC% export cycle=t%CYC%z +export FHR3=%FHR3% ############################################################ # CALL executable job script here diff --git a/ecf/scripts/gfs/atmos/gempak/jgfs_atmos_pgrb2_spec_gempak.ecf b/ecf/scripts/gfs/atmos/gempak/jgfs_atmos_pgrb2_spec_gempak.ecf index 04b07c58d14..df53868b054 100755 --- a/ecf/scripts/gfs/atmos/gempak/jgfs_atmos_pgrb2_spec_gempak.ecf +++ b/ecf/scripts/gfs/atmos/gempak/jgfs_atmos_pgrb2_spec_gempak.ecf @@ -1,9 +1,9 @@ #PBS -S /bin/bash -#PBS -N %RUN%_atmos_pgrb2_spec_gempak_%CYC% +#PBS -N %RUN%_atmos_pgrb2_spec_gempak_%FHR3%_%CYC% #PBS -j oe #PBS -q %QUEUE% #PBS -A %PROJ%-%PROJENVIR% -#PBS -l walltime=00:30:00 +#PBS -l walltime=00:15:00 #PBS -l select=1:ncpus=1:mem=1GB #PBS -l place=vscatter:shared #PBS -l debug=true @@ -35,6 +35,7 @@ module list ############################################################# export cyc=%CYC% export cycle=t%CYC%z +export FHR3=%FHR3% ############################################################ # CALL executable job script here diff --git a/env/AWSPW.env b/env/AWSPW.env index 54f2643b1aa..751f52db418 100755 --- a/env/AWSPW.env +++ b/env/AWSPW.env @@ -10,7 +10,7 @@ fi step=$1 export launcher="srun --mpi=pmi2 -l" -export mpmd_opt="--distribution=block:block --hint=nomultithread --cpus-per-task=1" +export mpmd_opt="" # Configure MPI environment export OMP_STACKSIZE=2048000 @@ -29,7 +29,7 @@ if [[ "${step}" = "fcst" ]] || [[ "${step}" = "efcs" ]]; then (( nnodes = (${!nprocs}+${!ppn}-1)/${!ppn} )) (( ntasks = nnodes*${!ppn} )) # With ESMF threading, the model wants to use the full node - export APRUN_UFS="${launcher} -n ${ntasks} ${mpmd_opt}" + export APRUN_UFS="${launcher} -n ${ntasks}" unset nprocs ppn nnodes ntasks elif [[ "${step}" = "post" ]]; then diff --git a/env/HERA.env b/env/HERA.env index db63f0bfa58..b743a19a624 100755 --- a/env/HERA.env +++ b/env/HERA.env @@ -140,13 +140,13 @@ elif [[ "${step}" = "ocnanalecen" ]]; then [[ ${NTHREADS_OCNANALECEN} -gt ${nth_max} ]] && export NTHREADS_OCNANALECEN=${nth_max} export APRUN_OCNANALECEN="${launcher} -n ${npe_ocnanalecen} --cpus-per-task=${NTHREADS_OCNANALECEN}" -elif [[ "${step}" = "ocnanalletkf" ]]; then +elif [[ "${step}" = "marineanalletkf" ]]; then - nth_max=$((npe_node_max / npe_node_ocnanalletkf)) + nth_max=$((npe_node_max / npe_node_marineanalletkf)) - export NTHREADS_OCNANALLETKF=${nth_ocnanalletkf:-${nth_max}} - [[ ${NTHREADS_OCNANALLETKF} -gt ${nth_max} ]] && export NTHREADS_OCNANALLETKF=${nth_max} - export APRUN_OCNANALLETKF="${launcher} -n ${npe_ocnanalletkf} --cpus-per-task=${NTHREADS_OCNANALLETKF}" + export NTHREADS_MARINEANALLETKF=${nth_marineanalletkf:-${nth_max}} + [[ ${NTHREADS_MARINEANALLETKF} -gt ${nth_max} ]] && export NTHREADS_MARINEANALLETKF=${nth_max} + export APRUN_MARINEANALLETKF="${launcher} -n ${npe_marineanalletkf} --cpus-per-task=${NTHREADS_MARINEANALLETKF}" elif [[ "${step}" = "anal" ]] || [[ "${step}" = "analcalc" ]]; then diff --git a/env/ORION.env b/env/ORION.env index 502e99e1928..c203acae485 100755 --- a/env/ORION.env +++ b/env/ORION.env @@ -148,13 +148,13 @@ elif [[ "${step}" = "ocnanalecen" ]]; then [[ ${NTHREADS_OCNANALECEN} -gt ${nth_max} ]] && export NTHREADS_OCNANALECEN=${nth_max} export APRUN_OCNANALECEN="${launcher} -n ${npe_ocnanalecen} --cpus-per-task=${NTHREADS_OCNANALECEN}" -elif [[ "${step}" = "ocnanalletkf" ]]; then +elif [[ "${step}" = "marineanalletkf" ]]; then - nth_max=$((npe_node_max / npe_node_ocnanalletkf)) + nth_max=$((npe_node_max / npe_node_marineanalletkf)) - export NTHREADS_OCNANALLETKF=${nth_ocnanalletkf:-${nth_max}} - [[ ${NTHREADS_OCNANALLETKF} -gt ${nth_max} ]] && export NTHREADS_OCNANALLETKF=${nth_max} - export APRUN_OCNANALLETKF="${launcher} -n ${npe_ocnanalletkf} --cpus-per-task=${NTHREADS_OCNANALLETKF}" + export NTHREADS_MARINEANALLETKF=${nth_marineanalletkf:-${nth_max}} + [[ ${NTHREADS_MARINEANALLETKF} -gt ${nth_max} ]] && export NTHREADS_MARINEANALLETKF=${nth_max} + export APRUN_MARINEANALLETKF="${launcher} -n ${npe_marineanalletkf} --cpus-per-task=${NTHREADS_MARINEANALLETKF}" elif [[ "${step}" = "anal" ]] || [[ "${step}" = "analcalc" ]]; then diff --git a/jobs/JGDAS_ATMOS_GEMPAK b/jobs/JGDAS_ATMOS_GEMPAK index 3a9c8e0a9c1..f5c00b9c989 100755 --- a/jobs/JGDAS_ATMOS_GEMPAK +++ b/jobs/JGDAS_ATMOS_GEMPAK @@ -3,27 +3,17 @@ source "${HOMEgfs}/ush/preamble.sh" source "${HOMEgfs}/ush/jjob_header.sh" -e "gempak" -c "base gempak" -# TODO (#1219) This j-job is not part of the rocoto suite - ############################################ # Set up model and cycle specific variables ############################################ - -export fend=09 -export finc=3 -export fstart=00 -export GRIB=pgrb2f +export model=${model:-gdas} +export GRIB=${GRIB:-pgrb2f} export EXT="" -export DBN_ALERT_TYPE=GDAS_GEMPAK +export DBN_ALERT_TYPE=${DBN_ALERT_TYPE:-GDAS_GEMPAK} export SENDDBN=${SENDDBN:-NO} export DBNROOT=${DBNROOT:-${UTILROOT}/fakedbn} -################################### -# Specify NET and RUN Name and model -#################################### -export model=${model:-gdas} - ############################################## # Define COM directories ############################################## @@ -40,26 +30,18 @@ for grid in 0p25 1p00; do fi done -# TODO: These actions belong in an ex-script not a j-job -if [[ -f poescript ]]; then - rm -f poescript -fi +######################################################## +# Execute the script for one degree grib +######################################################## -{ - ######################################################## - # Execute the script. - echo "${SCRgfs}/exgdas_atmos_nawips.sh 1p00 009 GDAS_GEMPAK ${COM_ATMOS_GEMPAK_1p00}" - ######################################################## +"${SCRgfs}/exgdas_atmos_nawips.sh" "1p00" "${FHR3}" "GDAS_GEMPAK" "${COM_ATMOS_GEMPAK_1p00}" - ######################################################## - # Execute the script for quater-degree grib - echo "${SCRgfs}/exgdas_atmos_nawips.sh 0p25 009 GDAS_GEMPAK ${COM_ATMOS_GEMPAK_0p25}" - ######################################################## -} > poescript +######################################################## +# Execute the script for quater-degree grib +######################################################## -cat poescript +"${SCRgfs}/exgdas_atmos_nawips.sh" "0p25" "${FHR3}" "GDAS_GEMPAK" "${COM_ATMOS_GEMPAK_0p25}" -"${HOMEgfs}/ush/run_mpmd.sh" poescript export err=$?; err_chk ############################################ diff --git a/jobs/JGFS_ATMOS_GEMPAK b/jobs/JGFS_ATMOS_GEMPAK index ab65cc3bcf2..9988378fe54 100755 --- a/jobs/JGFS_ATMOS_GEMPAK +++ b/jobs/JGFS_ATMOS_GEMPAK @@ -6,27 +6,20 @@ source "${HOMEgfs}/ush/jjob_header.sh" -e "gempak" -c "base gempak" ############################################ # Set up model and cycle specific variables ############################################ -export finc=${finc:-3} -export fstart=${fstart:-0} export model=${model:-gfs} export GRIB=${GRIB:-pgrb2f} export EXT="" export DBN_ALERT_TYPE=${DBN_ALERT_TYPE:-GFS_GEMPAK} +export SENDDBN=${SENDDBN:-NO} +export DBNROOT=${DBNROOT:-${UTILROOT}/fakedbn} + # For half-degree P Grib files export DO_HD_PGRB=${DO_HD_PGRB:-YES} -################################### -# Specify NET and RUN Name and model -#################################### -export model=${model:-gfs} - ############################################## # Define COM directories ############################################## -export SENDDBN=${SENDDBN:-NO} -export DBNROOT=${DBNROOT:-${UTILROOT}/fakedbn} - for grid in 0p25 0p50 1p00; do GRID=${grid} YMD=${PDY} HH=${cyc} declare_from_tmpl -rx "COM_ATMOS_GRIB_${grid}:COM_ATMOS_GRIB_GRID_TMPL" done @@ -40,73 +33,100 @@ for grid in 1p00 0p50 0p25 40km 35km_atl 35km_pac; do fi done -# TODO: These actions belong in an ex-script not a j-job -if [[ -f poescript ]]; then - rm -f poescript -fi - +fhr=10#${FHR3} ocean_domain_max=180 if (( ocean_domain_max > FHMAX_GFS )); then ocean_domain_max=${FHMAX_GFS} fi -{ - ################################################################# - # Execute the script for the 384 hour 1 degree grib - ################################################################## - echo "time ${SCRgfs}/exgfs_atmos_nawips.sh 1p00 ${FHMAX_GFS} GFS_GEMPAK ${COM_ATMOS_GEMPAK_1p00}" - echo "time ${SCRgfs}/exgfs_atmos_nawips.sh 1p00 ${FHMAX_GFS} GFS_GEMPAK ${COM_ATMOS_GEMPAK_1p00}" - echo "time ${SCRgfs}/exgfs_atmos_nawips.sh 1p00 ${FHMAX_GFS} GFS_GEMPAK ${COM_ATMOS_GEMPAK_1p00}" - echo "time ${SCRgfs}/exgfs_atmos_nawips.sh 1p00 ${FHMAX_GFS} GFS_GEMPAK ${COM_ATMOS_GEMPAK_1p00}" - echo "time ${SCRgfs}/exgfs_atmos_nawips.sh 1p00 ${FHMAX_GFS} GFS_GEMPAK ${COM_ATMOS_GEMPAK_1p00}" - echo "time ${SCRgfs}/exgfs_atmos_nawips.sh 1p00 ${FHMAX_GFS} GFS_GEMPAK ${COM_ATMOS_GEMPAK_1p00}" - - ################################################################# - # Execute the script for the half-degree grib - ################################################################## - echo "time ${SCRgfs}/exgfs_atmos_nawips.sh 0p50 ${FHMAX_GFS} GFS_GEMPAK ${COM_ATMOS_GEMPAK_0p50}" - echo "time ${SCRgfs}/exgfs_atmos_nawips.sh 0p50 ${FHMAX_GFS} GFS_GEMPAK ${COM_ATMOS_GEMPAK_0p50}" - echo "time ${SCRgfs}/exgfs_atmos_nawips.sh 0p50 ${FHMAX_GFS} GFS_GEMPAK ${COM_ATMOS_GEMPAK_0p50}" - echo "time ${SCRgfs}/exgfs_atmos_nawips.sh 0p50 ${FHMAX_GFS} GFS_GEMPAK ${COM_ATMOS_GEMPAK_0p50}" - echo "time ${SCRgfs}/exgfs_atmos_nawips.sh 0p50 ${FHMAX_GFS} GFS_GEMPAK ${COM_ATMOS_GEMPAK_0p50}" - echo "time ${SCRgfs}/exgfs_atmos_nawips.sh 0p50 ${FHMAX_GFS} GFS_GEMPAK ${COM_ATMOS_GEMPAK_0p50}" - - ################################################################# - # Execute the script for the quater-degree grib - #################################################################### - echo "time ${SCRgfs}/exgfs_atmos_nawips.sh 0p25 ${FHMAX_GFS} GFS_GEMPAK ${COM_ATMOS_GEMPAK_0p25}" - echo "time ${SCRgfs}/exgfs_atmos_nawips.sh 0p25 ${FHMAX_GFS} GFS_GEMPAK ${COM_ATMOS_GEMPAK_0p25}" - echo "time ${SCRgfs}/exgfs_atmos_nawips.sh 0p25 ${FHMAX_GFS} GFS_GEMPAK ${COM_ATMOS_GEMPAK_0p25}" - echo "time ${SCRgfs}/exgfs_atmos_nawips.sh 0p25 ${FHMAX_GFS} GFS_GEMPAK ${COM_ATMOS_GEMPAK_0p25}" - echo "time ${SCRgfs}/exgfs_atmos_nawips.sh 0p25 ${FHMAX_GFS} GFS_GEMPAK ${COM_ATMOS_GEMPAK_0p25}" - echo "time ${SCRgfs}/exgfs_atmos_nawips.sh 0p25 ${FHMAX_GFS} GFS_GEMPAK ${COM_ATMOS_GEMPAK_0p25}" - echo "time ${SCRgfs}/exgfs_atmos_nawips.sh 0p25 ${FHMAX_GFS} GFS_GEMPAK ${COM_ATMOS_GEMPAK_0p25}" - echo "time ${SCRgfs}/exgfs_atmos_nawips.sh 0p25 ${FHMAX_GFS} GFS_GEMPAK ${COM_ATMOS_GEMPAK_0p25}" - echo "time ${SCRgfs}/exgfs_atmos_nawips.sh 0p25 ${FHMAX_GFS} GFS_GEMPAK ${COM_ATMOS_GEMPAK_0p25}" - echo "time ${SCRgfs}/exgfs_atmos_nawips.sh 0p25 ${FHMAX_GFS} GFS_GEMPAK ${COM_ATMOS_GEMPAK_0p25}" - - #################################################################### - # Execute the script to create the 35km Pacific grids for OPC - ##################################################################### - echo "time ${SCRgfs}/exgfs_atmos_nawips.sh 35km_pac ${ocean_domain_max} GFS_GEMPAK_WWB ${COM_ATMOS_GEMPAK_35km_pac}" - echo "time ${SCRgfs}/exgfs_atmos_nawips.sh 35km_pac ${ocean_domain_max} GFS_GEMPAK_WWB ${COM_ATMOS_GEMPAK_35km_pac}" - - #################################################################### - # Execute the script to create the 35km Atlantic grids for OPC - ##################################################################### - echo "time ${SCRgfs}/exgfs_atmos_nawips.sh 35km_atl ${ocean_domain_max} GFS_GEMPAK_WWB ${COM_ATMOS_GEMPAK_35km_atl}" - echo "time ${SCRgfs}/exgfs_atmos_nawips.sh 35km_atl ${ocean_domain_max} GFS_GEMPAK_WWB ${COM_ATMOS_GEMPAK_35km_atl}" - - ##################################################################### - # Execute the script to create the 40km grids for HPC - ###################################################################### - echo "time ${SCRgfs}/exgfs_atmos_nawips.sh 40km ${ocean_domain_max} GFS_GEMPAK_WWB ${COM_ATMOS_GEMPAK_40km}" - echo "time ${SCRgfs}/exgfs_atmos_nawips.sh 40km ${ocean_domain_max} GFS_GEMPAK_WWB ${COM_ATMOS_GEMPAK_40km}" -} > poescript - -cat poescript - -"${HOMEgfs}/ush/run_mpmd.sh" poescript +################################################################# +# Execute the script for the 384 hour 1 degree grib +################################################################## +fhmin=0 +fhmax=240 +if (( fhr >= fhmin && fhr <= fhmax )); then + if ((fhr % 3 == 0)); then + "${SCRgfs}/exgfs_atmos_nawips.sh" "1p00" "${FHR3}" "GFS_GEMPAK" "${COM_ATMOS_GEMPAK_1p00}" + fi +fi + +fhmin=252 +fhmax=384 +if (( fhr >= fhmin && fhr <= fhmax )); then + if ((fhr % 12 == 0)); then + "${SCRgfs}/exgfs_atmos_nawips.sh" "1p00" "${FHR3}" "GFS_GEMPAK" "${COM_ATMOS_GEMPAK_1p00}" + fi +fi + +################################################################# +# Execute the script for the half-degree grib +################################################################## +fhmin=0 +fhmax=240 +if (( fhr >= fhmin && fhr <= fhmax )); then + if ((fhr % 3 == 0)); then + "${SCRgfs}/exgfs_atmos_nawips.sh" "0p50" "${FHR3}" "GFS_GEMPAK" "${COM_ATMOS_GEMPAK_0p50}" + fi +fi + +fhmin=246 +fhmax=276 +if (( fhr >= fhmin && fhr <= fhmax )); then + if ((fhr % 6 == 0)); then + "${SCRgfs}/exgfs_atmos_nawips.sh" "0p50" "${FHR3}" "GFS_GEMPAK" "${COM_ATMOS_GEMPAK_0p50}" + fi +fi + +fhmin=288 +fhmax=384 +if (( fhr >= fhmin && fhr <= fhmax )); then + if ((fhr % 12 == 0)); then + "${SCRgfs}/exgfs_atmos_nawips.sh" "0p50" "${FHR3}" "GFS_GEMPAK" "${COM_ATMOS_GEMPAK_0p50}" + fi +fi + +################################################################# +# Execute the script for the quater-degree grib +#################################################################### +fhmin=0 +fhmax=120 +if (( fhr >= fhmin && fhr <= fhmax )); then + if ((fhr % 1 == 0)); then + "${SCRgfs}/exgfs_atmos_nawips.sh" "0p25" "${FHR3}" "GFS_GEMPAK" "${COM_ATMOS_GEMPAK_0p25}" + fi +fi + +fhmin=123 +fhmax=240 +if (( fhr >= fhmin && fhr <= fhmax )); then + if ((fhr % 3 == 0)); then + "${SCRgfs}/exgfs_atmos_nawips.sh" "0p25" "${FHR3}" "GFS_GEMPAK" "${COM_ATMOS_GEMPAK_0p25}" + fi +fi + +fhmin=252 +fhmax=384 +if (( fhr >= fhmin && fhr <= fhmax )); then + if ((fhr % 12 == 0)); then + "${SCRgfs}/exgfs_atmos_nawips.sh" "0p25" "${FHR3}" "GFS_GEMPAK" "${COM_ATMOS_GEMPAK_0p25}" + fi +fi + +#################################################################### +# Execute the script to create the 35km and 40km grids +##################################################################### +fhmin=0 +fhmax="${ocean_domain_max}" +if (( fhr >= fhmin && fhr <= fhmax )); then + if ((fhr % 3 == 0)); then + "${SCRgfs}/exgfs_atmos_nawips.sh" "35km_pac" "${FHR3}" "GFS_GEMPAK_WWB" "${COM_ATMOS_GEMPAK_35km_pac}" + + "${SCRgfs}/exgfs_atmos_nawips.sh" "35km_atl" "${FHR3}" "GFS_GEMPAK_WWB" "${COM_ATMOS_GEMPAK_35km_atl}" + + "${SCRgfs}/exgfs_atmos_nawips.sh" "40km" "${FHR3}" "GFS_GEMPAK_WWB" "${COM_ATMOS_GEMPAK_40km}" + fi +fi + export err=$?; err_chk ############################################ diff --git a/jobs/JGFS_ATMOS_GEMPAK_PGRB2_SPEC b/jobs/JGFS_ATMOS_GEMPAK_PGRB2_SPEC index 582dde2f65c..0be52c0d19b 100755 --- a/jobs/JGFS_ATMOS_GEMPAK_PGRB2_SPEC +++ b/jobs/JGFS_ATMOS_GEMPAK_PGRB2_SPEC @@ -1,21 +1,17 @@ #! /usr/bin/env bash -############################################ -# GFS_PGRB2_SPEC_GEMPAK PRODUCT GENERATION -############################################ source "${HOMEgfs}/ush/preamble.sh" source "${HOMEgfs}/ush/jjob_header.sh" -e "gempak_spec" -c "base" -################################### -# Specify NET and RUN Name and model -#################################### +############################################ +# Set up model and cycle specific variables +############################################ export COMPONENT="atmos" -export finc=3 -export model=gfs +export model=${model:-gfs} export EXT="" -# For half-degree P Grib files -#export DO_HD_PGRB=YES +export SENDDBN=${SENDDBN:-NO} +export DBNROOT=${DBNROOT:-${UTILROOT}/fakedbn} ############################################## # Define COM directories @@ -24,9 +20,6 @@ YMD=${PDY} HH=${cyc} declare_from_tmpl -rx COM_ATMOS_GOES GRID=0p25 YMD=${PDY} HH=${cyc} declare_from_tmpl -rx "COM_ATMOS_GEMPAK_0p25:COM_ATMOS_GEMPAK_TMPL" if [[ ! -d "${COM_ATMOS_GEMPAK_0p25}" ]]; then mkdir -m 775 -p "${COM_ATMOS_GEMPAK_0p25}"; fi -export SENDDBN="${SENDDBN:-NO}" -export DBNROOT="${DBNROOT:-${UTILROOT}/fakedbn}" - ################################################################# # Execute the script for the regular grib ################################################################# @@ -38,22 +31,12 @@ export DBN_ALERT_TYPE=GFS_GOESSIM_GEMPAK export RUN2=gfs_goessim export GRIB=goessimpgrb2.0p25.f export EXT="" -export fend=180 -if (( fend > FHMAX_GFS )); then - fend=${FHMAX_GFS} -fi -export finc=3 -export fstart=0 - -echo "RUNS the Program" -######################################################## -# Execute the script. -"${SCRgfs}/exgfs_atmos_goes_nawips.sh" +"${SCRgfs}/exgfs_atmos_goes_nawips.sh" "${FHR3}" ################################################################# # Execute the script for the 221 grib - +################################################################# export DATA_SPECIAL221="${DATA}/SPECIAL221" mkdir -p "${DATA_SPECIAL221}" cd "${DATA_SPECIAL221}" || exit 1 @@ -62,26 +45,12 @@ export DBN_ALERT_TYPE=GFS_GOESSIM221_GEMPAK export RUN2=gfs_goessim221 export GRIB=goessimpgrb2f export EXT=".grd221" -export fend=180 -if (( fend > FHMAX_GFS )); then - fend=${FHMAX_GFS} -fi -export finc=3 -export fstart=0 -echo "RUNS the Program" +"${SCRgfs}/exgfs_atmos_goes_nawips.sh" "${FHR3}" -######################################################## -# Execute the script. -"${SCRgfs}/exgfs_atmos_goes_nawips.sh" export err=$?; err_chk -######################################################## -echo "end of program" cd "${DATA}" || exit 1 -echo "######################################" -echo " SPECIAL.OUT " -echo "######################################" ############################################ # print exec I/O output @@ -96,4 +65,3 @@ fi if [[ "${KEEPDATA}" != "YES" ]] ; then rm -rf "${DATA}" fi - diff --git a/jobs/JGDAS_GLOBAL_OCEAN_ANALYSIS_LETKF b/jobs/JGLOBAL_MARINE_ANALYSIS_LETKF similarity index 82% rename from jobs/JGDAS_GLOBAL_OCEAN_ANALYSIS_LETKF rename to jobs/JGLOBAL_MARINE_ANALYSIS_LETKF index d03ddfc19ac..38dc3049f90 100755 --- a/jobs/JGDAS_GLOBAL_OCEAN_ANALYSIS_LETKF +++ b/jobs/JGLOBAL_MARINE_ANALYSIS_LETKF @@ -1,6 +1,6 @@ #!/bin/bash source "${HOMEgfs}/ush/preamble.sh" -source "${HOMEgfs}/ush/jjob_header.sh" -e "ocnanalletkf" -c "base ocnanal ocnanalletkf" +source "${HOMEgfs}/ush/jjob_header.sh" -e "marineanalletkf" -c "base ocnanal marineanalletkf" ############################################## # Set variables used in the script @@ -13,8 +13,10 @@ gPDY=${GDATE:0:8} gcyc=${GDATE:8:2} YMD=${gPDY} HH=${gcyc} declare_from_tmpl -rx \ - COM_OCEAN_HISTORY_PREV:COM_OCEAN_HISTORY_TMPL \ - COM_ICE_HISTORY_PREV:COM_ICE_HISTORY_TMPL + COMIN_OCEAN_HISTORY_PREV:COM_OCEAN_HISTORY_TMPL \ + COMIN_ICE_HISTORY_PREV:COM_ICE_HISTORY_TMPL + +YMD=${PDY} HH=${cyc} declare_from_tmpl -rx COMIN_OBS:COM_OBS_TMPL ############################################## # Begin JOB SPECIFIC work diff --git a/jobs/rocoto/gempak.sh b/jobs/rocoto/gempak.sh index 82ea1175d87..f5aea2379dd 100755 --- a/jobs/rocoto/gempak.sh +++ b/jobs/rocoto/gempak.sh @@ -1,15 +1,14 @@ #! /usr/bin/env bash source "${HOMEgfs}/ush/preamble.sh" - -############################################################### -. "${HOMEgfs}/ush/load_fv3gfs_modules.sh" +source "${HOMEgfs}/ush/load_fv3gfs_modules.sh" status=$? if (( status != 0 )); then exit "${status}"; fi export job="gempak" export jobid="${job}.$$" + # Execute the JJOB "${HOMEgfs}/jobs/J${RUN^^}_ATMOS_GEMPAK" diff --git a/jobs/rocoto/gempakgrb2spec.sh b/jobs/rocoto/gempakgrb2spec.sh index f76c33ecdbe..ddcb84599e4 100755 --- a/jobs/rocoto/gempakgrb2spec.sh +++ b/jobs/rocoto/gempakgrb2spec.sh @@ -1,15 +1,14 @@ #! /usr/bin/env bash source "${HOMEgfs}/ush/preamble.sh" - -############################################################### -. "${HOMEgfs}/ush/load_fv3gfs_modules.sh" +source "${HOMEgfs}/ush/load_fv3gfs_modules.sh" status=$? if (( status != 0 )); then exit "${status}"; fi export job="gempakpgrb2spec" export jobid="${job}.$$" + # Execute the JJOB "${HOMEgfs}/jobs/JGFS_ATMOS_GEMPAK_PGRB2_SPEC" diff --git a/jobs/rocoto/ocnanalletkf.sh b/jobs/rocoto/marineanalletkf.sh similarity index 87% rename from jobs/rocoto/ocnanalletkf.sh rename to jobs/rocoto/marineanalletkf.sh index f710be57104..f2bfb9f70c3 100755 --- a/jobs/rocoto/ocnanalletkf.sh +++ b/jobs/rocoto/marineanalletkf.sh @@ -8,7 +8,7 @@ source "${HOMEgfs}/ush/preamble.sh" status=$? [[ ${status} -ne 0 ]] && exit "${status}" -export job="ocnanalletkf" +export job="marineanalletkf" export jobid="${job}.$$" ############################################################### @@ -18,6 +18,6 @@ export PYTHONPATH ############################################################### # Execute the JJOB -"${HOMEgfs}/jobs/JGDAS_GLOBAL_OCEAN_ANALYSIS_LETKF" +"${HOMEgfs}/jobs/JGLOBAL_MARINE_ANALYSIS_LETKF" status=$? exit "${status}" diff --git a/modulefiles/module_base.hera.lua b/modulefiles/module_base.hera.lua index f75cf886e71..701cc4667dc 100644 --- a/modulefiles/module_base.hera.lua +++ b/modulefiles/module_base.hera.lua @@ -47,8 +47,7 @@ setenv("WGRIB2","wgrib2") setenv("WGRIB","wgrib") setenv("UTILROOT",(os.getenv("prod_util_ROOT") or "None")) ---prepend_path("MODULEPATH", pathJoin("/scratch1/NCEPDEV/global/glopara/git/prepobs/v" .. (os.getenv("prepobs_run_ver") or "None"), "modulefiles")) -prepend_path("MODULEPATH", pathJoin("/scratch1/NCEPDEV/global/glopara/git/prepobs/dev-gfsv17/modulefiles")) +prepend_path("MODULEPATH", pathJoin("/scratch1/NCEPDEV/global/glopara/git/prepobs/v" .. (os.getenv("prepobs_run_ver") or "None"), "modulefiles")) load(pathJoin("prepobs", (os.getenv("prepobs_run_ver") or "None"))) prepend_path("MODULEPATH", pathJoin("/scratch1/NCEPDEV/global/glopara/git/Fit2Obs/v" .. (os.getenv("fit2obs_ver") or "None"), "modulefiles")) diff --git a/modulefiles/module_base.hercules.lua b/modulefiles/module_base.hercules.lua index 998803f2469..fdc5f586986 100644 --- a/modulefiles/module_base.hercules.lua +++ b/modulefiles/module_base.hercules.lua @@ -43,10 +43,12 @@ setenv("WGRIB2","wgrib2") setenv("WGRIB","wgrib") setenv("UTILROOT",(os.getenv("prod_util_ROOT") or "None")) -prepend_path("MODULEPATH", pathJoin("/work/noaa/global/glopara/git/prepobs/feature-GFSv17_com_reorg_log_update/modulefiles")) +--prepend_path("MODULEPATH", pathJoin"/work/noaa/global/glopara/git/prepobs/v" .. (os.getenv("prepobs_run_ver") or "None"), "modulefiles") +prepend_path("MODULEPATH", pathJoin("/work/noaa/global/kfriedma/glopara/git/prepobs/v" .. (os.getenv("prepobs_run_ver") or "None"), "modulefiles")) load(pathJoin("prepobs", (os.getenv("prepobs_run_ver") or "None"))) -prepend_path("MODULEPATH", pathJoin("/work/noaa/global/glopara/git/Fit2Obs/v" .. (os.getenv("fit2obs_ver") or "None"), "modulefiles")) +--prepend_path("MODULEPATH", pathJoin("/work/noaa/global/glopara/git/Fit2Obs/v" .. (os.getenv("fit2obs_ver") or "None"), "modulefiles")) +prepend_path("MODULEPATH", pathJoin("/work/noaa/global/kfriedma/glopara/git/Fit2Obs/v" .. (os.getenv("fit2obs_ver") or "None"), "modulefiles")) load(pathJoin("fit2obs", (os.getenv("fit2obs_ver") or "None"))) whatis("Description: GFS run environment") diff --git a/modulefiles/module_base.jet.lua b/modulefiles/module_base.jet.lua index e53132fd6a9..76320688b04 100644 --- a/modulefiles/module_base.jet.lua +++ b/modulefiles/module_base.jet.lua @@ -49,8 +49,7 @@ setenv("WGRIB2","wgrib2") setenv("WGRIB","wgrib") setenv("UTILROOT",(os.getenv("prod_util_ROOT") or "None")) ---prepend_path("MODULEPATH", pathJoin("/lfs4/HFIP/hfv3gfs/glopara/git/prepobs/v" .. (os.getenv("prepobs_run_ver") or "None"), "modulefiles")) -prepend_path("MODULEPATH", pathJoin("/lfs4/HFIP/hfv3gfs/glopara/git/prepobs/feature-GFSv17_com_reorg_log_update/modulefiles")) +prepend_path("MODULEPATH", pathJoin("/lfs4/HFIP/hfv3gfs/glopara/git/prepobs/v" .. (os.getenv("prepobs_run_ver") or "None"), "modulefiles")) load(pathJoin("prepobs", (os.getenv("prepobs_run_ver") or "None"))) prepend_path("MODULEPATH", pathJoin("/lfs4/HFIP/hfv3gfs/glopara/git/Fit2Obs/v" .. (os.getenv("fit2obs_ver") or "None"), "modulefiles")) diff --git a/modulefiles/module_base.noaacloud.lua b/modulefiles/module_base.noaacloud.lua index fb5b283087a..113409e41d9 100644 --- a/modulefiles/module_base.noaacloud.lua +++ b/modulefiles/module_base.noaacloud.lua @@ -9,9 +9,6 @@ load(pathJoin("stack-intel", (os.getenv("stack_intel_ver") or "None"))) load(pathJoin("stack-intel-oneapi-mpi", (os.getenv("stack_impi_ver") or "None"))) load(pathJoin("python", (os.getenv("python_ver") or "None"))) ---load(pathJoin("hpss", (os.getenv("hpss_ver") or "None"))) -load(pathJoin("gempak", (os.getenv("gempak_ver") or "None"))) -load(pathJoin("ncl", (os.getenv("ncl_ver") or "None"))) load(pathJoin("jasper", (os.getenv("jasper_ver") or "None"))) load(pathJoin("libpng", (os.getenv("libpng_ver") or "None"))) load(pathJoin("cdo", (os.getenv("cdo_ver") or "None"))) diff --git a/modulefiles/module_base.orion.lua b/modulefiles/module_base.orion.lua index 4d747512dba..5cee9e5e314 100644 --- a/modulefiles/module_base.orion.lua +++ b/modulefiles/module_base.orion.lua @@ -9,8 +9,6 @@ load(pathJoin("stack-intel", (os.getenv("stack_intel_ver") or "None"))) load(pathJoin("stack-intel-oneapi-mpi", (os.getenv("stack_impi_ver") or "None"))) load(pathJoin("python", (os.getenv("python_ver") or "None"))) -load(pathJoin("gempak", (os.getenv("gempak_ver") or "None"))) -load(pathJoin("ncl", (os.getenv("ncl_ver") or "None"))) load(pathJoin("jasper", (os.getenv("jasper_ver") or "None"))) load(pathJoin("libpng", (os.getenv("libpng_ver") or "None"))) load(pathJoin("cdo", (os.getenv("cdo_ver") or "None"))) @@ -44,11 +42,12 @@ setenv("WGRIB2","wgrib2") setenv("WGRIB","wgrib") setenv("UTILROOT",(os.getenv("prod_util_ROOT") or "None")) ---prepend_path("MODULEPATH", pathJoin("/work/noaa/global/glopara/git/prepobs/v" .. (os.getenv("prepobs_run_ver") or "None"), "modulefiles")) -prepend_path("MODULEPATH", pathJoin("/work/noaa/global/glopara/git/prepobs/feature-GFSv17_com_reorg_log_update/modulefiles")) +--prepend_path("MODULEPATH", pathJoin"/work/noaa/global/glopara/git/prepobs/v" .. (os.getenv("prepobs_run_ver") or "None"), "modulefiles") +prepend_path("MODULEPATH", pathJoin("/work/noaa/global/kfriedma/glopara/git/prepobs/v" .. (os.getenv("prepobs_run_ver") or "None"), "modulefiles")) load(pathJoin("prepobs", (os.getenv("prepobs_run_ver") or "None"))) -prepend_path("MODULEPATH", pathJoin("/work/noaa/global/glopara/git/Fit2Obs/v" .. (os.getenv("fit2obs_ver") or "None"), "modulefiles")) +--prepend_path("MODULEPATH", pathJoin("/work/noaa/global/glopara/git/Fit2Obs/v" .. (os.getenv("fit2obs_ver") or "None"), "modulefiles")) +prepend_path("MODULEPATH", pathJoin("/work/noaa/global/kfriedma/glopara/git/Fit2Obs/v" .. (os.getenv("fit2obs_ver") or "None"), "modulefiles")) load(pathJoin("fit2obs", (os.getenv("fit2obs_ver") or "None"))) whatis("Description: GFS run environment") diff --git a/modulefiles/module_base.wcoss2.lua b/modulefiles/module_base.wcoss2.lua index 43b21ccc252..49d5abc6788 100644 --- a/modulefiles/module_base.wcoss2.lua +++ b/modulefiles/module_base.wcoss2.lua @@ -36,8 +36,7 @@ setenv("HPC_OPT", "/apps/ops/para/libs") load(pathJoin("met", (os.getenv("met_ver") or "None"))) load(pathJoin("metplus", (os.getenv("metplus_ver") or "None"))) ---prepend_path("MODULEPATH", pathJoin("/lfs/h2/emc/global/save/emc.global/git/prepobs/v" .. (os.getenv("prepobs_run_ver") or "None"), "modulefiles")) -prepend_path("MODULEPATH", pathJoin("/lfs/h2/emc/global/save/emc.global/git/prepobs/feature-GFSv17_com_reorg_log_update/modulefiles")) +prepend_path("MODULEPATH", pathJoin("/lfs/h2/emc/global/save/emc.global/git/prepobs/v" .. (os.getenv("prepobs_run_ver") or "None"), "modulefiles")) load(pathJoin("prepobs", (os.getenv("prepobs_run_ver") or "None"))) prepend_path("MODULEPATH", pathJoin("/lfs/h2/emc/global/save/emc.global/git/Fit2Obs/v" .. (os.getenv("fit2obs_ver") or "None"), "modulefiles")) diff --git a/modulefiles/module_gwci.orion.lua b/modulefiles/module_gwci.orion.lua index cef7acf308f..e2213932d75 100644 --- a/modulefiles/module_gwci.orion.lua +++ b/modulefiles/module_gwci.orion.lua @@ -2,10 +2,10 @@ help([[ Load environment to run GFS workflow ci scripts on Orion ]]) -prepend_path("MODULEPATH", "/work/noaa/epic/role-epic/spack-stack/orion/spack-stack-1.6.0/envs/gsi-addon-env/install/modulefiles/Core") +prepend_path("MODULEPATH", "/work/noaa/epic/role-epic/spack-stack/orion/spack-stack-1.6.0/envs/gsi-addon-env-rocky9/install/modulefiles/Core") -load(pathJoin("stack-intel", os.getenv("2022.0.2"))) -load(pathJoin("stack-intel-oneapi-mpi", os.getenv("2021.5.1"))) +load(pathJoin("stack-intel", os.getenv("2021.9.0"))) +load(pathJoin("stack-intel-oneapi-mpi", os.getenv("2021.9.0"))) load(pathJoin("netcdf-c", os.getenv("4.9.2"))) load(pathJoin("netcdf-fortran", os.getenv("4.6.1"))) diff --git a/modulefiles/module_gwsetup.orion.lua b/modulefiles/module_gwsetup.orion.lua index 37cb5116598..b8e2fc8a9fe 100644 --- a/modulefiles/module_gwsetup.orion.lua +++ b/modulefiles/module_gwsetup.orion.lua @@ -4,11 +4,11 @@ Load environment to run GFS workflow ci scripts on Orion prepend_path("MODULEPATH", "/apps/modulefiles/core") load(pathJoin("contrib","0.1")) -load(pathJoin("rocoto","1.3.3")) +load(pathJoin("rocoto","1.3.7")) -prepend_path("MODULEPATH", "/work/noaa/epic/role-epic/spack-stack/orion/spack-stack-1.6.0/envs/gsi-addon-env/install/modulefiles/Core") +prepend_path("MODULEPATH", "/work/noaa/epic/role-epic/spack-stack/orion/spack-stack-1.6.0/envs/gsi-addon-env-rocky9/install/modulefiles/Core") -local stack_intel_ver=os.getenv("stack_intel_ver") or "2022.0.2" +local stack_intel_ver=os.getenv("stack_intel_ver") or "2021.9.0" local python_ver=os.getenv("python_ver") or "3.11.6" load(pathJoin("stack-intel", stack_intel_ver)) diff --git a/parm/archive/master_enkf.yaml.j2 b/parm/archive/master_enkf.yaml.j2 index f663d02895e..70f8a2ad89b 100644 --- a/parm/archive/master_enkf.yaml.j2 +++ b/parm/archive/master_enkf.yaml.j2 @@ -6,108 +6,111 @@ # Split IAUFHRS into a list; typically either "3,6,9" or 6 (integer) {% if IAUFHRS is string %} -# "3,6,9" -{% set iaufhrs = [] %} -{% for iaufhr in IAUFHRS.split(",") %} -{% do iaufhrs.append(iaufhr | int) %} -{% endfor %} + # "3,6,9" + {% set iaufhrs = [] %} + {% for iaufhr in IAUFHRS.split(",") %} + {% do iaufhrs.append(iaufhr | int) %} + {% endfor %} {% else %} -# 6 (integer) -{% set iaufhrs = [IAUFHRS] %} + # 6 (integer) + {% set iaufhrs = [IAUFHRS] %} {% endif %} # Repeat for IAUFHRS_ENKF {% if IAUFHRS_ENKF is string %} -{% set iaufhrs_enkf = [] %} -{% for iaufhr in IAUFHRS_ENKF.split(",") %} -{% do iaufhrs_enkf.append(iaufhr | int) %} -{% endfor %} + {% set iaufhrs_enkf = [] %} + {% for iaufhr in IAUFHRS_ENKF.split(",") %} + {% do iaufhrs_enkf.append(iaufhr | int) %} + {% endfor %} {% else %} -{% set iaufhrs_enkf = [IAUFHRS_ENKF] %} + {% set iaufhrs_enkf = [IAUFHRS_ENKF] %} {% endif %} # Determine which data to archive datasets: {% if ENSGRP == 0 %} -{% filter indent(width=4) %} -# Archive the ensemble means and spreads + {% filter indent(width=4) %} + # Archive the ensemble means and spreads {% include "enkf.yaml.j2" %} -{% endfilter %} + {% endfilter %} {% else %} -# Archive individual member data -# First, construct individual member directories from templates -# COMIN_ATMOS_ANALYSIS_MEM, COMIN_ATMOS_HISTORY_MEM, and COMIN_ATMOS_RESTART_MEM - -# Declare to-be-filled lists of member COM directories -{% set COMIN_ATMOS_ANALYSIS_MEM_list = [] %} -{% set COMIN_ATMOS_RESTART_MEM_list = [] %} -{% set COMIN_ATMOS_HISTORY_MEM_list = [] %} - -# Determine which ensemble members belong to this group -{% set first_group_mem = (ENSGRP - 1) * NMEM_EARCGRP + 1 %} -{% set last_group_mem = [ ENSGRP * NMEM_EARCGRP, nmem_ens ] | min %} - -# Construct member COM directories for the group -{% for mem in range(first_group_mem, last_group_mem + 1) %} - -# Declare a dict of search and replace terms to run on each template -{% set tmpl_dict = {'ROTDIR':ROTDIR, - 'RUN':RUN, - 'YMD':cycle_YMD, - 'HH':cycle_HH, - 'MEMDIR':"mem" + '%03d' % mem} %} - -# Replace template variables with tmpl_dict, one key at a time -# This must be done in a namespace to overcome jinja scoping -# Variables set inside of a for loop are lost at the end of the loop -# unless they are part of a namespace -{% set com_ns = namespace(COMIN_ATMOS_ANALYSIS_MEM = COM_ATMOS_ANALYSIS_TMPL, - COMIN_ATMOS_HISTORY_MEM = COM_ATMOS_HISTORY_TMPL, - COMIN_ATMOS_RESTART_MEM = COM_ATMOS_RESTART_TMPL) %} - -{% for key in tmpl_dict.keys() %} -{% set search_term = '${' + key + '}' %} -{% set replace_term = tmpl_dict[key] %} -{% set com_ns.COMIN_ATMOS_ANALYSIS_MEM = com_ns.COMIN_ATMOS_ANALYSIS_MEM.replace(search_term, replace_term) %} -{% set com_ns.COMIN_ATMOS_HISTORY_MEM = com_ns.COMIN_ATMOS_HISTORY_MEM.replace(search_term, replace_term) %} -{% set com_ns.COMIN_ATMOS_RESTART_MEM = com_ns.COMIN_ATMOS_RESTART_MEM.replace(search_term, replace_term) %} -{% endfor %} - -# Append the member COM directories -{% do COMIN_ATMOS_ANALYSIS_MEM_list.append(com_ns.COMIN_ATMOS_ANALYSIS_MEM)%} -{% do COMIN_ATMOS_HISTORY_MEM_list.append(com_ns.COMIN_ATMOS_HISTORY_MEM)%} -{% do COMIN_ATMOS_RESTART_MEM_list.append(com_ns.COMIN_ATMOS_RESTART_MEM)%} - -{% endfor %} - -# Archive member data -{% filter indent(width=4) %} + # Archive individual member data + # First, construct individual member directories from templates + # COMIN_ATMOS_ANALYSIS_MEM, COMIN_ATMOS_HISTORY_MEM, and COMIN_ATMOS_RESTART_MEM + + # Declare to-be-filled lists of member COM directories + {% set COMIN_ATMOS_ANALYSIS_MEM_list = [] %} + {% set COMIN_ATMOS_RESTART_MEM_list = [] %} + {% set COMIN_ATMOS_HISTORY_MEM_list = [] %} + + # Determine which ensemble members belong to this group + {% set first_group_mem = (ENSGRP - 1) * NMEM_EARCGRP + 1 %} + {% set last_group_mem = [ ENSGRP * NMEM_EARCGRP, nmem_ens ] | min %} + + # Construct member COM directories for the group + {% for mem in range(first_group_mem, last_group_mem + 1) %} + + # Declare a dict of search and replace terms to run on each template + {% set tmpl_dict = {'ROTDIR':ROTDIR, + 'RUN':RUN, + 'YMD':cycle_YMD, + 'HH':cycle_HH, + 'MEMDIR':"mem" + '%03d' % mem} %} + + # Replace template variables with tmpl_dict, one key at a time + # This must be done in a namespace to overcome jinja scoping + # Variables set inside of a for loop are lost at the end of the loop + # unless they are part of a namespace + {% set com_ns = namespace(COMIN_ATMOS_ANALYSIS_MEM = COM_ATMOS_ANALYSIS_TMPL, + COMIN_ATMOS_HISTORY_MEM = COM_ATMOS_HISTORY_TMPL, + COMIN_ATMOS_RESTART_MEM = COM_ATMOS_RESTART_TMPL) %} + + {% for key in tmpl_dict.keys() %} + {% set search_term = '${' + key + '}' %} + {% set replace_term = tmpl_dict[key] %} + {% set com_ns.COMIN_ATMOS_ANALYSIS_MEM = + com_ns.COMIN_ATMOS_ANALYSIS_MEM.replace(search_term, replace_term) %} + {% set com_ns.COMIN_ATMOS_HISTORY_MEM = + com_ns.COMIN_ATMOS_HISTORY_MEM.replace(search_term, replace_term) %} + {% set com_ns.COMIN_ATMOS_RESTART_MEM = + com_ns.COMIN_ATMOS_RESTART_MEM.replace(search_term, replace_term) %} + {% endfor %} + + # Append the member COM directories + {% do COMIN_ATMOS_ANALYSIS_MEM_list.append(com_ns.COMIN_ATMOS_ANALYSIS_MEM)%} + {% do COMIN_ATMOS_HISTORY_MEM_list.append(com_ns.COMIN_ATMOS_HISTORY_MEM)%} + {% do COMIN_ATMOS_RESTART_MEM_list.append(com_ns.COMIN_ATMOS_RESTART_MEM)%} + + {% endfor %} + + # Archive member data + {% filter indent(width=4) %} {% include "enkf_grp.yaml.j2" %} -{% endfilter %} + {% endfilter %} -# Determine if restarts should be saved -{% set save_warm_start_forecast, save_warm_start_cycled = ( False, False ) %} + # Determine if restarts should be saved + {% set save_warm_start_forecast, save_warm_start_cycled = ( False, False ) %} -# Save the increments and restarts every ARCH_WARMICFREQ days -# The ensemble increments (group a) should be saved on the ARCH_CYC -{% if (current_cycle - SDATE).days % ARCH_WARMICFREQ == 0 %} -{% if ARCH_CYC == cycle_HH | int %} -{% filter indent(width=4) %} + # Save the increments and restarts every ARCH_WARMICFREQ days + # The ensemble increments (group a) should be saved on the ARCH_CYC + {% if (current_cycle - SDATE).days % ARCH_WARMICFREQ == 0 %} + {% if ARCH_CYC == cycle_HH | int %} + {% filter indent(width=4) %} {% include "enkf_restarta_grp.yaml.j2" %} -{% endfilter %} -{% endif %} -{% endif %} - -# The ensemble ICs (group b) are restarts and always lag increments by assim_freq -{% set ics_offset = (assim_freq | string + "H") | to_timedelta %} -{% if (current_cycle | add_to_datetime(ics_offset) - SDATE).days % ARCH_WARMICFREQ == 0 %} -{% if (ARCH_CYC - assim_freq) % 24 == cycle_HH | int %} -{% filter indent(width=4) %} + {% endfilter %} + {% endif %} + {% endif %} + + # The ensemble ICs (group b) are restarts and always lag increments by assim_freq + {% set ics_offset = (assim_freq | string + "H") | to_timedelta %} + {% if (current_cycle | add_to_datetime(ics_offset) - SDATE).days % ARCH_WARMICFREQ == 0 %} + {% if (ARCH_CYC - assim_freq) % 24 == cycle_HH | int %} + {% filter indent(width=4) %} {% include "enkf_restartb_grp.yaml.j2" %} -{% endfilter %} -{% endif %} -{% endif %} + {% endfilter %} + {% endif %} + {% endif %} -# End of individual member archiving + # End of individual member archiving {% endif %} diff --git a/parm/archive/master_gdas.yaml.j2 b/parm/archive/master_gdas.yaml.j2 index f25fd9de403..30a2175653c 100644 --- a/parm/archive/master_gdas.yaml.j2 +++ b/parm/archive/master_gdas.yaml.j2 @@ -5,12 +5,12 @@ # Split IAUFHRS into a list; typically either "3,6,9" or 6 (integer) {% if IAUFHRS is string %} -{% set iaufhrs = [] %} -{% for iaufhr in IAUFHRS.split(",") %} -{% do iaufhrs.append(iaufhr | int) %} -{% endfor %} + {% set iaufhrs = [] %} + {% for iaufhr in IAUFHRS.split(",") %} + {% do iaufhrs.append(iaufhr | int) %} + {% endfor %} {% else %} -{% set iaufhrs = [IAUFHRS] %} + {% set iaufhrs = [IAUFHRS] %} {% endif %} datasets: @@ -20,84 +20,90 @@ datasets: {% endfilter %} {% if DO_ICE %} -# Ice data -{% filter indent(width=4) %} + # Ice data + {% filter indent(width=4) %} {% include "gdasice.yaml.j2" %} -{% endfilter %} + {% endfilter %} {% endif %} {% if DO_OCN %} -# Ocean forecast products -{% filter indent(width=4) %} + # Ocean forecast products + {% filter indent(width=4) %} {% include "gdasocean.yaml.j2" %} -{% endfilter %} -{% if DO_JEDIOCNVAR and MODE == "cycled" %} -# Ocean analysis products -{% filter indent(width=4) %} + {% endfilter %} + {% if DO_JEDIOCNVAR and MODE == "cycled" %} + # Ocean analysis products + {% filter indent(width=4) %} {% include "gdasocean_analysis.yaml.j2" %} -{% endfilter %} -{% endif %} + {% endfilter %} + {% endif %} {% endif %} {% if DO_WAVE %} -# Wave products -{% filter indent(width=4) %} + # Wave products + {% filter indent(width=4) %} {% include "gdaswave.yaml.j2" %} -{% endfilter %} + {% endfilter %} {% endif %} {% if MODE == "cycled" %} -# Determine if we will save restart ICs or not (only valid for cycled) -{% set save_warm_start_forecast, save_warm_start_cycled = ( False, False ) %} + # Determine if we will save restart ICs or not (only valid for cycled) + {% set save_warm_start_forecast, save_warm_start_cycled = ( False, False ) %} -{% if ARCH_CYC == cycle_HH | int%} -# Save the warm and forecast-only cycle ICs every ARCH_WARMICFREQ days -{% if (current_cycle - SDATE).days % ARCH_WARMICFREQ == 0 %} -{% set save_warm_start_forecast = True %} -{% set save_warm_start_cycled = True %} -# Save the forecast-only restarts every ARCH_FCSTICFREQ days -{% elif (current_cycle - SDATE).days % ARCH_FCSTICFREQ == 0 %} -{% set save_warm_start_forecast = True %} -{% endif %} -{% endif %} + {% if ARCH_CYC == cycle_HH | int%} + # Save the forecast-only cycle ICs every ARCH_WARMICFREQ or ARCH_FCSTICFREQ days + {% if (current_cycle - SDATE).days % ARCH_WARMICFREQ == 0 %} + {% set save_warm_start_forecast = True %} + {% elif (current_cycle - SDATE).days % ARCH_FCSTICFREQ == 0 %} + {% set save_warm_start_forecast = True %} + {% endif %} + {% endif %} -{% if save_warm_start_forecast %} -# Save warm start forecast-only data -# Atmosphere restarts -{% filter indent(width=4) %} + # The GDAS ICs (group b) are restarts and always lag increments by assim_freq + {% if (ARCH_CYC - assim_freq) % 24 == cycle_HH | int %} + {% set ics_offset = (assim_freq | string + "H") | to_timedelta %} + {% if (current_cycle | add_to_datetime(ics_offset) - SDATE).days % ARCH_WARMICFREQ == 0 %} + {% set save_warm_start_cycled = True %} + {% endif %} + {% endif %} + + {% if save_warm_start_forecast %} + # Save warm start forecast-only data + # Atmosphere restarts + {% filter indent(width=4) %} {% include "gdas_restarta.yaml.j2" %} -{% endfilter %} + {% endfilter %} -{% if DO_WAVE %} -# Wave restarts -{% filter indent(width=4) %} + {% if DO_WAVE %} + # Wave restarts + {% filter indent(width=4) %} {% include "gdaswave_restart.yaml.j2" %} -{% endfilter %} -{% endif %} + {% endfilter %} + {% endif %} -{% if DO_OCN %} -# Ocean restarts -{% filter indent(width=4) %} + {% if DO_OCN %} + # Ocean restarts + {% filter indent(width=4) %} {% include "gdasocean_restart.yaml.j2" %} -{% endfilter %} -{% endif %} + {% endfilter %} + {% endif %} -{% if DO_ICE %} -# Ice restarts -{% filter indent(width=4) %} + {% if DO_ICE %} + # Ice restarts + {% filter indent(width=4) %} {% include "gdasice_restart.yaml.j2" %} -{% endfilter %} -{% endif %} + {% endfilter %} + {% endif %} -# End of forecast-only restarts -{% endif %} + # End of forecast-only restarts + {% endif %} -{% if save_warm_start_cycled %} -# Save warm start cycled restarts -{% filter indent(width=4) %} + {% if save_warm_start_cycled %} + # Save warm start cycled restarts + {% filter indent(width=4) %} {% include "gdas_restartb.yaml.j2" %} -{% endfilter %} -{% endif %} + {% endfilter %} + {% endif %} -# End of restart checking + # End of restart checking {% endif %} diff --git a/parm/archive/master_gfs.yaml.j2 b/parm/archive/master_gfs.yaml.j2 index 67cde482a23..14178f3e7e0 100644 --- a/parm/archive/master_gfs.yaml.j2 +++ b/parm/archive/master_gfs.yaml.j2 @@ -5,14 +5,14 @@ # Split IAUFHRS into a list; typically either "3,6,9" or 6 (integer) {% if IAUFHRS is string %} -# "3,6,9" -{% set iaufhrs = [] %} -{% for iaufhr in IAUFHRS.split(",") %} -{% do iaufhrs.append(iaufhr | int) %} -{% endfor %} + # "3,6,9" + {% set iaufhrs = [] %} + {% for iaufhr in IAUFHRS.split(",") %} + {% do iaufhrs.append(iaufhr | int) %} + {% endfor %} {% else %} -# 6 (integer) -{% set iaufhrs = [IAUFHRS] %} + # 6 (integer) + {% set iaufhrs = [IAUFHRS] %} {% endif %} # Determine which data to archive @@ -24,89 +24,89 @@ datasets: {% endfilter %} {% if ARCH_GAUSSIAN %} -# Archive Gaussian data -{% filter indent(width=4) %} + # Archive Gaussian data + {% filter indent(width=4) %} {% include "gfs_flux.yaml.j2" %} {% include "gfs_netcdfb.yaml.j2" %} {% include "gfs_pgrb2b.yaml.j2" %} -{% endfilter %} -{% if MODE == "cycled" %} -# Archive Gaussian analysis data -{% filter indent(width=4) %} + {% endfilter %} + {% if MODE == "cycled" %} + # Archive Gaussian analysis data + {% filter indent(width=4) %} {% include "gfs_netcdfa.yaml.j2" %} -{% endfilter %} -{% endif %} + {% endfilter %} + {% endif %} {% endif %} {% if DO_WAVE %} -# Wave forecasts -{% filter indent(width=4) %} + # Wave forecasts + {% filter indent(width=4) %} {% include "gfswave.yaml.j2" %} -{% endfilter %} + {% endfilter %} {% endif %} {% if AERO_FCST_CDUMP == "gfs" or AERO_FCST_CDUMP == "both" %} -# Aerosol forecasts -{% filter indent(width=4) %} + # Aerosol forecasts + {% filter indent(width=4) %} {% include "chem.yaml.j2" %} -{% endfilter %} + {% endfilter %} {% endif %} {% if DO_OCN %} -# Ocean forecasts -{% filter indent(width=4) %} + # Ocean forecasts + {% filter indent(width=4) %} {% include "ocean_6hravg.yaml.j2" %} {% include "ocean_grib2.yaml.j2" %} {% include "gfs_flux_1p00.yaml.j2" %} -{% endfilter %} + {% endfilter %} {% endif %} {% if DO_ICE %} -# Ice forecasts -{% filter indent(width=4) %} + # Ice forecasts + {% filter indent(width=4) %} {% include "ice_6hravg.yaml.j2" %} {% include "ice_grib2.yaml.j2" %} -{% endfilter %} + {% endfilter %} {% endif %} {% if DO_BUFRSND %} -# Downstream BUFR soundings -{% filter indent(width=4) %} + # Downstream BUFR soundings + {% filter indent(width=4) %} {% include "gfs_downstream.yaml.j2" %} -{% endfilter %} + {% endfilter %} {% endif %} # Determine whether to save the MOS tarball {% if DO_MOS and cycle_HH == "18" %} -{% if not REALTIME %} -{% filter indent(width=4) %} + {% if not REALTIME %} + {% filter indent(width=4) %} {% include "gfsmos.yaml.j2" %} -{% endfilter %} + {% endfilter %} -{% else %} + {% else %} -{% set td_from_sdate = current_cycle - SDATE %} -{% set td_one_day = "+1D" | to_timedelta %} -{% if td_from_sdate > td_one_day %} -{% filter indent(width=4) %} + {% set td_from_sdate = current_cycle - SDATE %} + {% set td_one_day = "+1D" | to_timedelta %} + {% if td_from_sdate > td_one_day %} + {% filter indent(width=4) %} {% include "gfsmos.yaml.j2" %} -{% endfilter %} -{% endif %} + {% endfilter %} + {% endif %} -{% endif %} + {% endif %} {% endif %} # Determine if we will save restart ICs or not {% if ARCH_CYC == cycle_HH | int %} -# Save the forecast-only cycle ICs every ARCH_WARMICFREQ or ARCH_FCSTICFREQ days -{% if (current_cycle - SDATE).days % ARCH_WARMICFREQ == 0 %} -{% filter indent(width=4) %} + # Save the forecast-only cycle ICs every ARCH_WARMICFREQ or ARCH_FCSTICFREQ days + {% if (current_cycle - SDATE).days % ARCH_WARMICFREQ == 0 %} + {% filter indent(width=4) %} {% include "gfs_restarta.yaml.j2" %} -{% endfilter %} -{% elif (current_cycle - SDATE).days % ARCH_FCSTICFREQ == 0 %} -{% filter indent(width=4) %} + {% endfilter %} + {% elif (current_cycle - SDATE).days % ARCH_FCSTICFREQ == 0 %} + {% filter indent(width=4) %} {% include "gfs_restarta.yaml.j2" %} -{% endfilter %} -{% endif %} + {% endfilter %} + {% endif %} {% endif %} diff --git a/parm/config/gfs/config.base b/parm/config/gfs/config.base index f00a91302a3..ce14ebefa11 100644 --- a/parm/config/gfs/config.base +++ b/parm/config/gfs/config.base @@ -313,6 +313,13 @@ if (( FHMAX_HF_GFS < 120 )); then export ILPOST=${FHOUT_GFS} fi +# Limit bounds of goes processing +export FHMAX_GOES=180 +export FHOUT_GOES=3 +if (( FHMAX_GOES > FHMAX_GFS )); then + export FHMAX_GOES=${FHMAX_GFS} +fi + # GFS restart interval in hours export restart_interval_gfs=12 # NOTE: Do not set this to zero. Instead set it to $FHMAX_GFS @@ -478,8 +485,9 @@ export OFFSET_START_HOUR=0 # Number of regional collectives to create soundings for export NUM_SND_COLLECTIVES=${NUM_SND_COLLECTIVES:-9} -echo "machine: ${machine}" -if [ "${machine}" == "noaacloud" ]; then +# The tracker, genesis, and METplus jobs are not supported on AWS yet +# TODO: we should place these in workflow/hosts/awspw.yaml as part of AWS setup, not for general. +if [[ "${machine}" == "AWSPW" ]]; then export DO_TRACKER="NO" export DO_GENESIS="NO" export DO_METP="NO" diff --git a/parm/config/gfs/config.marineanalletkf b/parm/config/gfs/config.marineanalletkf new file mode 100644 index 00000000000..fde3433a13d --- /dev/null +++ b/parm/config/gfs/config.marineanalletkf @@ -0,0 +1,18 @@ +#!/bin/bash + +########## config.marineanalletkf ########## +# Ocn Analysis specific + +echo "BEGIN: config.marineanalletkf" + +# Get task specific resources +. "${EXPDIR}/config.resources" marineanalletkf + +export MARINE_LETKF_EXEC="${JEDI_BIN}/gdas.x" +export MARINE_LETKF_YAML_TMPL="${PARMgfs}/gdas/soca/letkf/letkf.yaml.j2" +export MARINE_LETKF_STAGE_YAML_TMPL="${PARMgfs}/gdas/soca/letkf/letkf_stage.yaml.j2" + +export GRIDGEN_EXEC="${JEDI_BIN}/gdas_soca_gridgen.x" +export GRIDGEN_YAML="${PARMgfs}/gdas/soca/gridgen/gridgen.yaml" + +echo "END: config.marineanalletkf" diff --git a/parm/config/gfs/config.ocnanal b/parm/config/gfs/config.ocnanal index 38a6cbd52a6..367e570ec8f 100644 --- a/parm/config/gfs/config.ocnanal +++ b/parm/config/gfs/config.ocnanal @@ -16,8 +16,8 @@ export SOCA_NINNER=@SOCA_NINNER@ export CASE_ANL=@CASE_ANL@ export DOMAIN_STACK_SIZE=116640000 #TODO: Make the stack size resolution dependent export JEDI_BIN=${HOMEgfs}/sorc/gdas.cd/build/bin - -export COMIN_OBS=@COMIN_OBS@ +export SOCA_FIX_STAGE_YAML_TMPL="${PARMgfs}/gdas/soca/soca_fix_stage.yaml.j2" +export SOCA_ENS_BKG_STAGE_YAML_TMPL="${PARMgfs}/gdas/soca/soca_ens_bkg_stage.yaml.j2" # NICAS export NICAS_RESOL=@NICAS_RESOL@ diff --git a/parm/config/gfs/config.ocnanalletkf b/parm/config/gfs/config.ocnanalletkf deleted file mode 100644 index b67f37152e7..00000000000 --- a/parm/config/gfs/config.ocnanalletkf +++ /dev/null @@ -1,11 +0,0 @@ -#!/bin/bash - -########## config.ocnanalletkf ########## -# Ocn Analysis specific - -echo "BEGIN: config.ocnanalletkf" - -# Get task specific resources -. "${EXPDIR}/config.resources" ocnanalletkf - -echo "END: config.ocnanalletkf" diff --git a/parm/config/gfs/config.resources b/parm/config/gfs/config.resources index b333ccbc85a..3ccb84ad598 100644 --- a/parm/config/gfs/config.resources +++ b/parm/config/gfs/config.resources @@ -25,7 +25,7 @@ if (( $# != 1 )); then echo "waveinit waveprep wavepostsbs wavepostbndpnt wavepostbndpntbll wavepostpnt" echo "wavegempak waveawipsbulls waveawipsgridded" echo "postsnd awips gempak npoess" - echo "ocnanalprep prepoceanobs ocnanalbmat ocnanalrun ocnanalecen ocnanalletkf ocnanalchkpt ocnanalpost ocnanalvrfy" + echo "ocnanalprep prepoceanobs ocnanalbmat ocnanalrun ocnanalecen marineanalletkf ocnanalchkpt ocnanalpost ocnanalvrfy" exit 1 fi @@ -559,32 +559,32 @@ case ${step} in export memory_ocnanalecen ;; - "ocnanalletkf") + "marineanalletkf") npes=16 case ${OCNRES} in "025") npes=480 - memory_ocnanalletkf="96GB" + memory_marineanalletkf="96GB" ;; "050") npes=16 - memory_ocnanalletkf="96GB" + memory_marineanalletkf="96GB" ;; "500") npes=16 - memory_ocnanalletkf="24GB" + memory_marineanalletkf="24GB" ;; *) echo "FATAL ERROR: Resources not defined for job ${step} at resolution ${OCNRES}" exit 4 esac - export wtime_ocnanalletkf="00:10:00" - export npe_ocnanalletkf=${npes} - export nth_ocnanalletkf=1 + export wtime_marineanalletkf="00:10:00" + export npe_marineanalletkf=${npes} + export nth_marineanalletkf=1 export is_exclusive=True - export npe_node_ocnanalletkf=$(( npe_node_max / nth_ocnanalletkf )) - export memory_ocnanalletkf + export npe_node_marineanalletkf=$(( npe_node_max / nth_marineanalletkf )) + export memory_marineanalletkf ;; @@ -1219,7 +1219,7 @@ case ${step} in ;; "gempak") - export wtime_gempak="03:00:00" + export wtime_gempak="00:30:00" export npe_gempak_gdas=2 export npe_gempak_gfs=28 export npe_node_gempak_gdas=2 diff --git a/parm/config/gfs/config.resources.ORION b/parm/config/gfs/config.resources.ORION new file mode 100644 index 00000000000..e3e81b01825 --- /dev/null +++ b/parm/config/gfs/config.resources.ORION @@ -0,0 +1,17 @@ +#! /usr/bin/env bash + +# Orion-specific job resources + +case ${step} in + "anal") + # TODO: + # On Orion, after Rocky 9 upgrade, GSI performance is degraded. + # Remove this block once GSI issue is resolved + # https://github.com/NOAA-EMC/GSI/pull/764 + # https://github.com/JCSDA/spack-stack/issues/1166 + export wtime_anal_gdas="02:40:00" + export wtime_anal_gfs="02:00:00" + ;; + *) + ;; +esac diff --git a/scripts/exgdas_atmos_nawips.sh b/scripts/exgdas_atmos_nawips.sh index ea350239c1b..7feb3210acb 100755 --- a/scripts/exgdas_atmos_nawips.sh +++ b/scripts/exgdas_atmos_nawips.sh @@ -1,20 +1,14 @@ #! /usr/bin/env bash ################################################################### -# echo "----------------------------------------------------" # echo "exnawips - convert NCEP GRIB files into GEMPAK Grids" -# echo "----------------------------------------------------" -# echo "History: Mar 2000 - First implementation of this new script." -# echo "S Lilly: May 2008 - add logic to make sure that all of the " -# echo " data produced from the restricted ECMWF" -# echo " data on the CCS is properly protected." -##################################################################### +################################################################### source "${USHgfs}/preamble.sh" "${2}" cd "${DATA}" || exit 1 grid=$1 -fend=$2 +fhr3=$2 DBN_ALERT_TYPE=$3 destination=$4 @@ -22,6 +16,9 @@ DATA_RUN="${DATA}/${grid}" mkdir -p "${DATA_RUN}" cd "${DATA_RUN}" || exit 1 +# "Import" functions used in this script +source "${USHgfs}/product_functions.sh" + for table in g2varswmo2.tbl g2vcrdwmo2.tbl g2varsncep1.tbl g2vcrdncep1.tbl; do cp "${HOMEgfs}/gempak/fix/${table}" "${table}" || \ ( echo "FATAL ERROR: ${table} is missing" && exit 2 ) @@ -39,71 +36,50 @@ proj= output=T pdsext=no -sleep_interval=10 -max_tries=180 - -fhr=$(( 10#${fstart} )) -while (( fhr <= 10#${fend} )); do - fhr3=$(printf "%03d" "${fhr}") - - source_dirvar="COM_ATMOS_GRIB_${grid}" - GEMGRD="${RUN}_${grid}_${PDY}${cyc}f${fhr3}" - export GRIBIN="${!source_dirvar}/${model}.${cycle}.pgrb2.${grid}.f${fhr3}" - GRIBIN_chk="${GRIBIN}.idx" - - if ! wait_for_file "${GRIBIN_chk}" "${sleep_interval}" "${max_tries}"; then - echo "FATAL ERROR: after 1 hour of waiting for ${GRIBIN_chk} file at F${fhr3} to end." - export err=7 ; err_chk - exit "${err}" - fi - - cp "${GRIBIN}" "grib${fhr3}" - - export pgm="nagrib2 F${fhr3}" - startmsg - - ${NAGRIB} << EOF - GBFILE = grib${fhr3} - INDXFL = - GDOUTF = ${GEMGRD} - PROJ = ${proj} - GRDAREA = ${grdarea} - KXKY = ${kxky} - MAXGRD = ${maxgrd} - CPYFIL = ${cpyfil} - GAREA = ${garea} - OUTPUT = ${output} - GBTBLS = ${gbtbls} - GBDIAG = - PDSEXT = ${pdsext} - l - r + + +GEMGRD="${RUN}_${grid}_${PDY}${cyc}f${fhr3}" +source_dirvar="COM_ATMOS_GRIB_${grid}" +export GRIBIN="${!source_dirvar}/${model}.${cycle}.pgrb2.${grid}.f${fhr3}" +GRIBIN_chk="${GRIBIN}.idx" + +if [[ ! -r "${GRIBIN_chk}" ]]; then + echo "FATAL ERROR: GRIB index file ${GRIBIN_chk} not found!" + export err=7 ; err_chk + exit "${err}" +fi + +cp "${GRIBIN}" "grib${fhr3}" + +export pgm="nagrib2 F${fhr3}" +startmsg + +${NAGRIB} << EOF +GBFILE = grib${fhr3} +INDXFL = +GDOUTF = ${GEMGRD} +PROJ = ${proj} +GRDAREA = ${grdarea} +KXKY = ${kxky} +MAXGRD = ${maxgrd} +CPYFIL = ${cpyfil} +GAREA = ${garea} +OUTPUT = ${output} +GBTBLS = ${gbtbls} +GBDIAG = +PDSEXT = ${pdsext} +l +r EOF - export err=$?; err_chk - - cp "${GEMGRD}" "${destination}/${GEMGRD}" - export err=$? - if (( err != 0 )) ; then - echo "FATAL ERROR: ${GEMGRD} does not exist." - exit "${err}" - fi - - if [[ ${SENDDBN} = "YES" ]] ; then - "${DBNROOT}/bin/dbn_alert" MODEL "${DBN_ALERT_TYPE}" "${job}" \ - "${destination}/${GEMGRD}" - else - echo "##### DBN_ALERT_TYPE is: ${DBN_ALERT_TYPE} #####" - fi - - if (( fhr >= 240 )) ; then - fhr=$((fhr+12)) - else - fhr=$((fhr+finc)) - fi -done -"${GEMEXE}/gpend" -##################################################################### +export err=$?; err_chk +cpfs "${GEMGRD}" "${destination}/${GEMGRD}" +if [[ ${SENDDBN} = "YES" ]] ; then + "${DBNROOT}/bin/dbn_alert" MODEL "${DBN_ALERT_TYPE}" "${job}" \ + "${destination}/${GEMGRD}" +fi + +"${GEMEXE}/gpend" ############################### END OF SCRIPT ####################### diff --git a/scripts/exgfs_atmos_goes_nawips.sh b/scripts/exgfs_atmos_goes_nawips.sh index 2c725a64020..86b0eea7957 100755 --- a/scripts/exgfs_atmos_goes_nawips.sh +++ b/scripts/exgfs_atmos_goes_nawips.sh @@ -1,26 +1,22 @@ #! /usr/bin/env bash ################################################################### -# echo "----------------------------------------------------" # echo "exnawips - convert NCEP GRIB files into GEMPAK Grids" -# echo "----------------------------------------------------" -# echo "History: Mar 2000 - First implementation of this new script." -# echo "S Lilly: May 2008 - add logic to make sure that all of the " -# echo " data produced from the restricted ECMWF" -# echo " data on the CCS is properly protected." -# echo "C. Magee: 10/2013 - swap X and Y for rtgssthr Atl and Pac." -##################################################################### +################################################################### source "${USHgfs}/preamble.sh" -cd "${DATA}" || exit 2 +cd "${DATA}" || exit 1 +fhr3=$1 + +# "Import" functions used in this script +source "${USHgfs}/product_functions.sh" for table in g2varswmo2.tbl g2vcrdwmo2.tbl g2varsncep1.tbl g2vcrdncep1.tbl; do cp "${HOMEgfs}/gempak/fix/${table}" "${table}" || \ ( echo "FATAL ERROR: ${table} is missing" && exit 2 ) done -# NAGRIB_TABLE="${HOMEgfs}/gempak/fix/nagrib.tbl" NAGRIB="${GEMEXE}/nagrib2" @@ -48,58 +44,49 @@ else fi pdsext=no -sleep_interval=20 -max_tries=180 -fhr=${fstart} -for (( fhr=fstart; fhr <= fend; fhr=fhr+finc )); do - fhr3=$(printf "%03d" "${fhr}") - GRIBIN="${COM_ATMOS_GOES}/${model}.${cycle}.${GRIB}${fhr3}${EXT}" - GEMGRD="${RUN2}_${PDY}${cyc}f${fhr3}" - - GRIBIN_chk="${GRIBIN}" - - if ! wait_for_file "${GRIBIN_chk}" "${sleep_interval}" "${max_tries}"; then - echo "FATAL ERROR: after 1 hour of waiting for ${GRIBIN_chk} file at F${fhr3} to end." - export err=7 ; err_chk - exit "${err}" - fi - - cp "${GRIBIN}" "grib${fhr3}" - - export pgm="nagrib_nc F${fhr3}" - - ${NAGRIB} << EOF - GBFILE = grib${fhr3} - INDXFL = - GDOUTF = ${GEMGRD} - PROJ = ${proj} - GRDAREA = ${grdarea} - KXKY = ${kxky} - MAXGRD = ${maxgrd} - CPYFIL = ${cpyfil} - GAREA = ${garea} - OUTPUT = ${output} - GBTBLS = ${gbtbls} - GBDIAG = - PDSEXT = ${pdsext} - l - r -EOF - export err=$?;err_chk - "${GEMEXE}/gpend" - cpfs "${GEMGRD}" "${COM_ATMOS_GEMPAK_0p25}/${GEMGRD}" - if [[ ${SENDDBN} == "YES" ]] ; then - "${DBNROOT}/bin/dbn_alert" MODEL "${DBN_ALERT_TYPE}" "${job}" \ - "${COM_ATMOS_GEMPAK_0p25}/${GEMGRD}" - else - echo "##### DBN_ALERT_TYPE is: ${DBN_ALERT_TYPE} #####" - fi +GEMGRD="${RUN2}_${PDY}${cyc}f${fhr3}" +GRIBIN="${COM_ATMOS_GOES}/${model}.${cycle}.${GRIB}${fhr3}${EXT}" +GRIBIN_chk="${GRIBIN}" + +if [[ ! -r "${GRIBIN_chk}" ]]; then + echo "FATAL ERROR: GRIB index file ${GRIBIN_chk} not found!" + export err=7 ; err_chk + exit "${err}" +fi + +cp "${GRIBIN}" "grib${fhr3}" + +export pgm="nagrib_nc F${fhr3}" +startmsg + +${NAGRIB} << EOF +GBFILE = grib${fhr3} +INDXFL = +GDOUTF = ${GEMGRD} +PROJ = ${proj} +GRDAREA = ${grdarea} +KXKY = ${kxky} +MAXGRD = ${maxgrd} +CPYFIL = ${cpyfil} +GAREA = ${garea} +OUTPUT = ${output} +GBTBLS = ${gbtbls} +GBDIAG = +PDSEXT = ${pdsext} +l +r +EOF -done +export err=$?; err_chk -##################################################################### +cpfs "${GEMGRD}" "${COM_ATMOS_GEMPAK_0p25}/${GEMGRD}" +if [[ ${SENDDBN} == "YES" ]] ; then + "${DBNROOT}/bin/dbn_alert" MODEL "${DBN_ALERT_TYPE}" "${job}" \ + "${COM_ATMOS_GEMPAK_0p25}/${GEMGRD}" +fi +"${GEMEXE}/gpend" ############################### END OF SCRIPT ####################### diff --git a/scripts/exgfs_atmos_grib2_special_npoess.sh b/scripts/exgfs_atmos_grib2_special_npoess.sh index 8d182469eda..63f5518b548 100755 --- a/scripts/exgfs_atmos_grib2_special_npoess.sh +++ b/scripts/exgfs_atmos_grib2_special_npoess.sh @@ -133,12 +133,9 @@ done ################################################################ # Specify Forecast Hour Range F000 - F180 for GOESSIMPGRB files ################################################################ -export SHOUR=0 -export FHOUR=180 -export FHINC=3 -if (( FHOUR > FHMAX_GFS )); then - export FHOUR="${FHMAX_GFS}" -fi +export SHOUR=${FHMIN_GFS} +export FHOUR=${FHMAX_GOES} +export FHINC=${FHOUT_GOES} ################################# # Process GFS PGRB2_SPECIAL_POST diff --git a/scripts/exgfs_atmos_nawips.sh b/scripts/exgfs_atmos_nawips.sh index 25873473a81..9cf1969f65b 100755 --- a/scripts/exgfs_atmos_nawips.sh +++ b/scripts/exgfs_atmos_nawips.sh @@ -1,14 +1,8 @@ #! /usr/bin/env bash ################################################################### -# echo "----------------------------------------------------" # echo "exnawips - convert NCEP GRIB files into GEMPAK Grids" -# echo "----------------------------------------------------" -# echo "History: Mar 2000 - First implementation of this new script." -# echo "S Lilly: May 2008 - add logic to make sure that all of the " -# echo " data produced from the restricted ECMWF" -# echo " data on the CCS is properly protected." -##################################################################### +################################################################### source "${USHgfs}/preamble.sh" "${2}" @@ -19,7 +13,7 @@ export ILPOST=${ILPOST:-1} cd "${DATA}" || exit 1 grid=$1 -fend=$2 +fhr3=$2 DBN_ALERT_TYPE=$3 destination=$4 @@ -30,9 +24,7 @@ cd "${DATA_RUN}" || exit 1 # "Import" functions used in this script source "${USHgfs}/product_functions.sh" -# NAGRIB="${GEMEXE}/nagrib2" -# cpyfil=gds garea=dset @@ -46,68 +38,65 @@ pdsext=no sleep_interval=10 max_tries=360 -fhr=$(( 10#${fstart} )) -while (( fhr <= 10#${fend} )) ; do - - fhr3=$(printf "%03d" "${fhr}") - - if mkdir "lock.${fhr3}" ; then - cd "lock.${fhr3}" || exit 1 - - for table in g2varswmo2.tbl g2vcrdwmo2.tbl g2varsncep1.tbl g2vcrdncep1.tbl; do - cp "${HOMEgfs}/gempak/fix/${table}" "${table}" || \ - ( echo "FATAL ERROR: ${table} is missing" && exit 2 ) - done - - GEMGRD="${RUN}_${grid}_${PDY}${cyc}f${fhr3}" - - # Set type of Interpolation for WGRIB2 - export opt1=' -set_grib_type same -new_grid_winds earth ' - export opt1uv=' -set_grib_type same -new_grid_winds grid ' - export opt21=' -new_grid_interpolation bilinear -if ' - export opt22=":(CSNOW|CRAIN|CFRZR|CICEP|ICSEV):" - export opt23=' -new_grid_interpolation neighbor -fi ' - export opt24=' -set_bitmap 1 -set_grib_max_bits 16 -if ' - export opt25=":(APCP|ACPCP|PRATE|CPRAT):" - export opt26=' -set_grib_max_bits 25 -fi -if ' - export opt27=":(APCP|ACPCP|PRATE|CPRAT|DZDT):" - export opt28=' -new_grid_interpolation budget -fi ' - - case ${grid} in - # TODO: Why aren't we interpolating from the 0p25 grids for 35-km and 40-km? - '0p50' | '0p25') grid_in=${grid};; - *) grid_in="1p00";; - esac - - source_var="COM_ATMOS_GRIB_${grid_in}" - export GRIBIN="${!source_var}/${model}.${cycle}.pgrb2.${grid_in}.f${fhr3}" - GRIBIN_chk="${!source_var}/${model}.${cycle}.pgrb2.${grid_in}.f${fhr3}.idx" - - if ! wait_for_file "${GRIBIN_chk}" "${sleep_interval}" "${max_tries}"; then - echo "FATAL ERROR: after 1 hour of waiting for ${GRIBIN_chk} file at F${fhr3} to end." - export err=7 ; err_chk - exit "${err}" - fi - - case "${grid}" in - 35km_pac) grid_spec='latlon 130.0:416:0.312 75.125:186:-0.312';; - 35km_atl) grid_spec='latlon 230.0:480:0.312 75.125:242:-0.312';; - 40km) grid_spec='lambert:265.0:25.0:25.0 226.541:185:40635.0 12.19:129:40635.0';; - *) grid_spec='';; - esac - - if [[ "${grid_spec}" != "" ]]; then - # shellcheck disable=SC2086,SC2248 - "${WGRIB2}" "${GRIBIN}" ${opt1uv} ${opt21} ${opt22} ${opt23} ${opt24} ${opt25} ${opt26} ${opt27} ${opt28} -new_grid ${grid_spec} "grib${fhr3}" - trim_rh "grib${fhr3}" - else - cp "${GRIBIN}" "grib${fhr3}" - fi - - export pgm="nagrib2 F${fhr3}" - startmsg - - ${NAGRIB} << EOF + + +mkdir -p "lock.${fhr3}" +cd "lock.${fhr3}" || exit 1 + +for table in g2varswmo2.tbl g2vcrdwmo2.tbl g2varsncep1.tbl g2vcrdncep1.tbl; do + cp "${HOMEgfs}/gempak/fix/${table}" "${table}" || \ + ( echo "FATAL ERROR: ${table} is missing" && exit 2 ) +done + +GEMGRD="${RUN}_${grid}_${PDY}${cyc}f${fhr3}" + +# Set type of Interpolation for WGRIB2 +export opt1=' -set_grib_type same -new_grid_winds earth ' +export opt1uv=' -set_grib_type same -new_grid_winds grid ' +export opt21=' -new_grid_interpolation bilinear -if ' +export opt22=":(CSNOW|CRAIN|CFRZR|CICEP|ICSEV):" +export opt23=' -new_grid_interpolation neighbor -fi ' +export opt24=' -set_bitmap 1 -set_grib_max_bits 16 -if ' +export opt25=":(APCP|ACPCP|PRATE|CPRAT):" +export opt26=' -set_grib_max_bits 25 -fi -if ' +export opt27=":(APCP|ACPCP|PRATE|CPRAT|DZDT):" +export opt28=' -new_grid_interpolation budget -fi ' + +case ${grid} in + # TODO: Why aren't we interpolating from the 0p25 grids for 35-km and 40-km? + '0p50' | '0p25') grid_in=${grid};; + *) grid_in="1p00";; +esac + +source_var="COM_ATMOS_GRIB_${grid_in}" +export GRIBIN="${!source_var}/${model}.${cycle}.pgrb2.${grid_in}.f${fhr3}" +GRIBIN_chk="${!source_var}/${model}.${cycle}.pgrb2.${grid_in}.f${fhr3}.idx" + +if ! wait_for_file "${GRIBIN_chk}" "${sleep_interval}" "${max_tries}"; then + echo "FATAL ERROR: after 1 hour of waiting for ${GRIBIN_chk} file at F${fhr3} to end." + export err=7 ; err_chk + exit "${err}" +fi + +case "${grid}" in + 35km_pac) grid_spec='latlon 130.0:416:0.312 75.125:186:-0.312';; + 35km_atl) grid_spec='latlon 230.0:480:0.312 75.125:242:-0.312';; + 40km) grid_spec='lambert:265.0:25.0:25.0 226.541:185:40635.0 12.19:129:40635.0';; + *) grid_spec='';; +esac + +if [[ "${grid_spec}" != "" ]]; then + # shellcheck disable=SC2086,SC2248 + "${WGRIB2}" "${GRIBIN}" ${opt1uv} ${opt21} ${opt22} ${opt23} ${opt24} ${opt25} ${opt26} ${opt27} ${opt28} -new_grid ${grid_spec} "grib${fhr3}" + trim_rh "grib${fhr3}" +else + cp "${GRIBIN}" "grib${fhr3}" +fi + +export pgm="nagrib2 F${fhr3}" +startmsg + +${NAGRIB} << EOF GBFILE = grib${fhr3} INDXFL = GDOUTF = ${GEMGRD} @@ -124,31 +113,16 @@ PDSEXT = ${pdsext} l r EOF - export err=$?;err_chk - - cpfs "${GEMGRD}" "${destination}/${GEMGRD}" - if [[ ${SENDDBN} == "YES" ]] ; then - "${DBNROOT}/bin/dbn_alert" MODEL "${DBN_ALERT_TYPE}" "${job}" \ - "${destination}/${GEMGRD}" - fi - cd "${DATA_RUN}" || exit 1 - else - if (( fhr >= 240 )) ; then - if (( fhr < 276 )) && [[ "${grid}" = "0p50" ]] ; then - fhr=$((fhr+6)) - else - fhr=$((fhr+12)) - fi - elif ((fhr < 120)) && [[ "${grid}" = "0p25" ]] ; then - fhr=$((fhr + ILPOST)) - else - fhr=$((ILPOST > finc ? fhr+ILPOST : fhr+finc )) - fi - fi -done -"${GEMEXE}/gpend" -##################################################################### +export err=$?;err_chk +cpfs "${GEMGRD}" "${destination}/${GEMGRD}" +if [[ ${SENDDBN} == "YES" ]] ; then + "${DBNROOT}/bin/dbn_alert" MODEL "${DBN_ALERT_TYPE}" "${job}" \ + "${destination}/${GEMGRD}" +fi +cd "${DATA_RUN}" || exit 1 + +"${GEMEXE}/gpend" ############################### END OF SCRIPT ####################### diff --git a/sorc/gdas.cd b/sorc/gdas.cd index e3644a98c36..368c9c5db9b 160000 --- a/sorc/gdas.cd +++ b/sorc/gdas.cd @@ -1 +1 @@ -Subproject commit e3644a98c362d7321f9e3081a4e55947885ed2bf +Subproject commit 368c9c5db9b5ea62e72937b6d1b0f753adb9be40 diff --git a/sorc/wxflow b/sorc/wxflow index 5dad7dd61ce..8406beeea41 160000 --- a/sorc/wxflow +++ b/sorc/wxflow @@ -1 +1 @@ -Subproject commit 5dad7dd61cebd9b3f2b163b3b06bb75eae1860a9 +Subproject commit 8406beeea410118cdfbd8300895b2b2878eadba6 diff --git a/ush/detect_machine.sh b/ush/detect_machine.sh index cfd0fa97e27..b049a6040ef 100755 --- a/ush/detect_machine.sh +++ b/ush/detect_machine.sh @@ -45,7 +45,7 @@ case $(hostname -f) in *) MACHINE_ID=UNKNOWN ;; # Unknown platform esac -if [[ ${MACHINE_ID} == "UNKNOWN" ]]; then +if [[ ${MACHINE_ID} == "UNKNOWN" ]]; then case ${PW_CSP:-} in "aws" | "google" | "azure") MACHINE_ID=noaacloud ;; *) PW_CSP="UNKNOWN" @@ -75,7 +75,7 @@ elif [[ -d /scratch1 ]]; then MACHINE_ID=hera elif [[ -d /work ]]; then # We are on MSU Orion or Hercules - mount=$(findmnt -n -o SOURCE /home) + mount=$(findmnt -n -o SOURCE /home) if [[ ${mount} =~ "hercules" ]]; then MACHINE_ID=hercules else diff --git a/ush/parsing_namelists_CICE.sh b/ush/parsing_namelists_CICE.sh index aa495d1864c..3822094c97c 100755 --- a/ush/parsing_namelists_CICE.sh +++ b/ush/parsing_namelists_CICE.sh @@ -59,6 +59,7 @@ local CICE_RUNID="unknown" local CICE_USE_RESTART_TIME=${use_restart_time} local CICE_RESTART_DIR="./CICE_RESTART/" local CICE_RESTART_FILE="cice_model.res" +local CICE_ICE_IC='cice_model.res.nc' local CICE_RESTART_DEFLATE=0 local CICE_RESTART_CHUNK=0,0 local CICE_RESTART_STRIDE=-99 @@ -117,6 +118,12 @@ local CICE_NPROC=${ntasks_cice6} local CICE_BLCKX=${block_size_x} local CICE_BLCKY=${block_size_y} local CICE_DECOMP=${processor_shape} +# ice_prescribed_nml section +local CICE_PRESCRIBED="false" +local MESH_DICE="none" +local stream_files_dice="none" + + # Ensure the template exists local template=${CICE_TEMPLATE:-"${PARMgfs}/ufs/ice_in.IN"} diff --git a/ush/parsing_ufs_configure.sh b/ush/parsing_ufs_configure.sh index 24ea80e56c9..062b40591e3 100755 --- a/ush/parsing_ufs_configure.sh +++ b/ush/parsing_ufs_configure.sh @@ -39,6 +39,8 @@ if [[ "${cpl}" = ".true." ]]; then local coupling_interval_slow_sec="${CPL_SLOW}" fi +local WRITE_ENDOFRUN_RESTART=.false. + if [[ "${cplflx}" = ".true." ]]; then local use_coldstart=${use_coldstart:-".false."} @@ -56,6 +58,7 @@ if [[ "${cplflx}" = ".true." ]]; then local ATMTILESIZE="${CASE:1}" local ocean_albedo_limit=0.06 local pio_rearranger=${pio_rearranger:-"box"} + local MED_history_n=1000000 fi if [[ "${cplice}" = ".true." ]]; then diff --git a/ush/python/pygfs/task/marine_letkf.py b/ush/python/pygfs/task/marine_letkf.py index 0ae5bea98d1..0fdd3d9aba5 100644 --- a/ush/python/pygfs/task/marine_letkf.py +++ b/ush/python/pygfs/task/marine_letkf.py @@ -1,11 +1,16 @@ #!/usr/bin/env python3 +import f90nml from logging import getLogger +import os from pygfs.task.analysis import Analysis from typing import Dict -from wxflow import (chdir, +from wxflow import (AttrDict, + FileHandler, logit, - Task) + parse_j2yaml, + to_timedelta, + to_YMDH) logger = getLogger(__name__.split('.')[-1]) @@ -30,6 +35,21 @@ def __init__(self, config: Dict) -> None: logger.info("init") super().__init__(config) + _half_assim_freq = to_timedelta(f"{self.task_config.assim_freq}H") / 2 + _letkf_yaml_file = 'letkf.yaml' + _letkf_exec_args = [self.task_config.MARINE_LETKF_EXEC, + 'soca', + 'localensembleda', + _letkf_yaml_file] + + self.task_config.WINDOW_MIDDLE = self.task_config.current_cycle + self.task_config.WINDOW_BEGIN = self.task_config.current_cycle - _half_assim_freq + self.task_config.letkf_exec_args = _letkf_exec_args + self.task_config.letkf_yaml_file = _letkf_yaml_file + self.task_config.mom_input_nml_tmpl = os.path.join(self.task_config.DATA, 'mom_input.nml.tmpl') + self.task_config.mom_input_nml = os.path.join(self.task_config.DATA, 'mom_input.nml') + self.task_config.obs_dir = os.path.join(self.task_config.DATA, 'obs') + @logit(logger) def initialize(self): """Method initialize for ocean and sea ice LETKF task @@ -43,6 +63,63 @@ def initialize(self): logger.info("initialize") + # make directories and stage ensemble background files + ensbkgconf = AttrDict() + keys = ['previous_cycle', 'current_cycle', 'DATA', 'NMEM_ENS', + 'PARMgfs', 'ROTDIR', 'COM_OCEAN_HISTORY_TMPL', 'COM_ICE_HISTORY_TMPL'] + for key in keys: + ensbkgconf[key] = self.task_config[key] + ensbkgconf.RUN = 'enkfgdas' + soca_ens_bkg_stage_list = parse_j2yaml(self.task_config.SOCA_ENS_BKG_STAGE_YAML_TMPL, ensbkgconf) + FileHandler(soca_ens_bkg_stage_list).sync() + soca_fix_stage_list = parse_j2yaml(self.task_config.SOCA_FIX_STAGE_YAML_TMPL, self.task_config) + FileHandler(soca_fix_stage_list).sync() + letkf_stage_list = parse_j2yaml(self.task_config.MARINE_LETKF_STAGE_YAML_TMPL, self.task_config) + FileHandler(letkf_stage_list).sync() + + obs_list = parse_j2yaml(self.task_config.OBS_YAML, self.task_config) + + # get the list of observations + obs_files = [] + for ob in obs_list['observers']: + obs_name = ob['obs space']['name'].lower() + obs_filename = f"{self.task_config.RUN}.t{self.task_config.cyc}z.{obs_name}.{to_YMDH(self.task_config.current_cycle)}.nc" + obs_files.append((obs_filename, ob)) + + obs_files_to_copy = [] + obs_to_use = [] + # copy obs from COMIN_OBS to DATA/obs + for obs_file, ob in obs_files: + obs_src = os.path.join(self.task_config.COMIN_OBS, obs_file) + obs_dst = os.path.join(self.task_config.DATA, self.task_config.obs_dir, obs_file) + if os.path.exists(obs_src): + obs_files_to_copy.append([obs_src, obs_dst]) + obs_to_use.append(ob) + else: + logger.warning(f"{obs_file} is not available in {self.task_config.COMIN_OBS}") + + # stage the desired obs files + FileHandler({'copy': obs_files_to_copy}).sync() + + # make the letkf.yaml + letkfconf = AttrDict() + keys = ['WINDOW_BEGIN', 'WINDOW_MIDDLE', 'RUN', 'gcyc', 'NMEM_ENS'] + for key in keys: + letkfconf[key] = self.task_config[key] + letkfconf.RUN = 'enkfgdas' + letkf_yaml = parse_j2yaml(self.task_config.MARINE_LETKF_YAML_TMPL, letkfconf) + letkf_yaml.observations.observers = obs_to_use + letkf_yaml.save(self.task_config.letkf_yaml_file) + + # swap date and stack size in mom_input.nml + domain_stack_size = self.task_config.DOMAIN_STACK_SIZE + ymdhms = [int(s) for s in self.task_config.WINDOW_BEGIN.strftime('%Y,%m,%d,%H,%M,%S').split(',')] + with open(self.task_config.mom_input_nml_tmpl, 'r') as nml_file: + nml = f90nml.read(nml_file) + nml['ocean_solo_nml']['date_init'] = ymdhms + nml['fms_nml']['domains_stack_size'] = int(domain_stack_size) + nml.write(self.task_config.mom_input_nml, force=True) # force to overwrite if necessary + @logit(logger) def run(self): """Method run for ocean and sea ice LETKF task @@ -56,8 +133,6 @@ def run(self): logger.info("run") - chdir(self.runtime_config.DATA) - @logit(logger) def finalize(self): """Method finalize for ocean and sea ice LETKF task diff --git a/versions/build.orion.ver b/versions/build.orion.ver index df7856110d9..834ecfc1668 100644 --- a/versions/build.orion.ver +++ b/versions/build.orion.ver @@ -1,5 +1,5 @@ -export stack_intel_ver=2022.0.2 -export stack_impi_ver=2021.5.1 -export spack_env=gsi-addon-env +export stack_intel_ver=2021.9.0 +export stack_impi_ver=2021.9.0 +export spack_env=gsi-addon-env-rocky9 source "${HOMEgfs:-}/versions/build.spack.ver" export spack_mod_path="/work/noaa/epic/role-epic/spack-stack/orion/spack-stack-${spack_stack_ver}/envs/${spack_env}/install/modulefiles/Core" diff --git a/versions/run.noaacloud.ver b/versions/run.noaacloud.ver index 7c23da0e7ab..4c9ac3cd428 100644 --- a/versions/run.noaacloud.ver +++ b/versions/run.noaacloud.ver @@ -2,10 +2,7 @@ export stack_intel_ver=2021.3.0 export stack_impi_ver=2021.3.0 export spack_env=gsi-addon-env -export gempak_ver=7.4.2 - source "${HOMEgfs:-}/versions/run.spack.ver" export spack_mod_path="/contrib/spack-stack/spack-stack-${spack_stack_ver}/envs/gsi-addon-env/install/modulefiles/Core" -export ncl_ver=6.6.2 export cdo_ver=2.2.0 diff --git a/versions/run.orion.ver b/versions/run.orion.ver index 2fdeae8888e..112636fb20f 100644 --- a/versions/run.orion.ver +++ b/versions/run.orion.ver @@ -1,12 +1,6 @@ -export stack_intel_ver=2022.0.2 -export stack_impi_ver=2021.5.1 -export spack_env=gsi-addon-env - -export ncl_ver=6.6.2 -export gempak_ver=7.5.1 +export stack_intel_ver=2021.9.0 +export stack_impi_ver=2021.9.0 +export spack_env=gsi-addon-env-rocky9 source "${HOMEgfs:-}/versions/run.spack.ver" export spack_mod_path="/work/noaa/epic/role-epic/spack-stack/orion/spack-stack-${spack_stack_ver}/envs/${spack_env}/install/modulefiles/Core" - -#cdo is older on Orion -export cdo_ver=2.0.5 diff --git a/versions/run.spack.ver b/versions/run.spack.ver index 5640f7f5f86..9aa5460c804 100644 --- a/versions/run.spack.ver +++ b/versions/run.spack.ver @@ -29,7 +29,7 @@ export metplus_ver=3.1.1 export py_xarray_ver=2023.7.0 export obsproc_run_ver=1.1.2 -export prepobs_run_ver=1.0.1 +export prepobs_run_ver=1.0.2 export ens_tracker_ver=feature-GFSv17_com_reorg -export fit2obs_ver=1.1.1 +export fit2obs_ver=1.1.2 diff --git a/versions/run.wcoss2.ver b/versions/run.wcoss2.ver index 18599222bf2..7f653dd50e7 100644 --- a/versions/run.wcoss2.ver +++ b/versions/run.wcoss2.ver @@ -45,9 +45,9 @@ export metplus_ver=3.1.1 # Development-only below export obsproc_run_ver=1.1.2 -export prepobs_run_ver=1.0.1 +export prepobs_run_ver=1.0.2 export ens_tracker_ver=feature-GFSv17_com_reorg -export fit2obs_ver=1.0.0 +export fit2obs_ver=1.1.2 export mos_ver=5.4.3 export mos_shared_ver=2.7.2 diff --git a/workflow/hosts/awspw.yaml b/workflow/hosts/awspw.yaml index 2b065a2d61f..20d87d5acc2 100644 --- a/workflow/hosts/awspw.yaml +++ b/workflow/hosts/awspw.yaml @@ -14,6 +14,7 @@ QUEUE_SERVICE: batch PARTITION_BATCH: compute PARTITION_SERVICE: compute RESERVATION: '' +CLUSTERS: '' CHGRP_RSTPROD: 'YES' CHGRP_CMD: 'chgrp rstprod' # TODO: This is not yet supported. HPSSARCH: 'NO' diff --git a/workflow/hosts/container.yaml b/workflow/hosts/container.yaml index 5f4a66ac1fa..907f69754ec 100644 --- a/workflow/hosts/container.yaml +++ b/workflow/hosts/container.yaml @@ -14,6 +14,7 @@ QUEUE_SERVICE: '' PARTITION_BATCH: '' PARTITION_SERVICE: '' RESERVATION: '' +CLUSTERS: '' CHGRP_RSTPROD: 'YES' CHGRP_CMD: 'chgrp rstprod' HPSSARCH: 'NO' diff --git a/workflow/hosts/hera.yaml b/workflow/hosts/hera.yaml index 8cf73636058..76a7158f433 100644 --- a/workflow/hosts/hera.yaml +++ b/workflow/hosts/hera.yaml @@ -16,6 +16,7 @@ PARTITION_BATCH: hera PARTITION_SERVICE: service RESERVATION: '' CHGRP_RSTPROD: 'YES' +CLUSTERS: '' CHGRP_CMD: 'chgrp rstprod' HPSSARCH: 'YES' HPSS_PROJECT: emc-global diff --git a/workflow/hosts/hercules.yaml b/workflow/hosts/hercules.yaml index adebdfe23d4..975558160f1 100644 --- a/workflow/hosts/hercules.yaml +++ b/workflow/hosts/hercules.yaml @@ -16,6 +16,7 @@ PARTITION_BATCH: hercules PARTITION_SERVICE: service RESERVATION: '' CHGRP_RSTPROD: 'YES' +CLUSTERS: '' CHGRP_CMD: 'chgrp rstprod' HPSSARCH: 'NO' HPSS_PROJECT: emc-global diff --git a/workflow/hosts/jet.yaml b/workflow/hosts/jet.yaml index fd556fadc7e..b526e073c3b 100644 --- a/workflow/hosts/jet.yaml +++ b/workflow/hosts/jet.yaml @@ -16,6 +16,7 @@ PARTITION_BATCH: kjet PARTITION_SERVICE: service RESERVATION: '' CHGRP_RSTPROD: 'YES' +CLUSTERS: '' CHGRP_CMD: 'chgrp rstprod' HPSSARCH: 'YES' HPSS_PROJECT: emc-global diff --git a/workflow/hosts/orion.yaml b/workflow/hosts/orion.yaml index ba289df1e39..fe36c8e7ce1 100644 --- a/workflow/hosts/orion.yaml +++ b/workflow/hosts/orion.yaml @@ -16,6 +16,7 @@ PARTITION_BATCH: orion PARTITION_SERVICE: service RESERVATION: '' CHGRP_RSTPROD: 'YES' +CLUSTERS: '' CHGRP_CMD: 'chgrp rstprod' HPSSARCH: 'NO' HPSS_PROJECT: emc-global diff --git a/workflow/hosts/s4.yaml b/workflow/hosts/s4.yaml index 543912cf238..37479fa13c6 100644 --- a/workflow/hosts/s4.yaml +++ b/workflow/hosts/s4.yaml @@ -16,6 +16,7 @@ PARTITION_BATCH: s4 PARTITION_SERVICE: serial RESERVATION: '' CHGRP_RSTPROD: 'NO' +CLUSTERS: '' CHGRP_CMD: 'ls' HPSSARCH: 'NO' HPSS_PROJECT: emc-global diff --git a/workflow/hosts/wcoss2.yaml b/workflow/hosts/wcoss2.yaml index 49434952894..e3650e47107 100644 --- a/workflow/hosts/wcoss2.yaml +++ b/workflow/hosts/wcoss2.yaml @@ -16,6 +16,7 @@ PARTITION_BATCH: '' PARTITION_SERVICE: '' RESERVATION: '' CHGRP_RSTPROD: 'YES' +CLUSTERS: '' CHGRP_CMD: 'chgrp rstprod' HPSSARCH: 'NO' HPSS_PROJECT: emc-global diff --git a/workflow/rocoto/gfs_tasks.py b/workflow/rocoto/gfs_tasks.py index 99299950d0d..46db0235e51 100644 --- a/workflow/rocoto/gfs_tasks.py +++ b/workflow/rocoto/gfs_tasks.py @@ -27,7 +27,9 @@ def stage_ic(self): #The if block below is added for AWS. #If we have a proper way to define 'BASE_CPLIC', this if block can be removed. if ('BASE_CPLIC' not in cpl_ic.keys()): - cpl_ic['BASE_CPLIC'] = os.environ.get('BASE_CPLIC', '/contrib/Wei.Huang/data/ICDIRS/prototype_ICs') + cpl_ic['BASE_CPLIC'] = os.environ.get('BASE_CPLIC', '/bucket/global-workflow-shared-data/ICSDIR/prototype_ICs') + if ('CPL_ATMIC' not in cpl_ic.keys()): + cpl_ic['CPL_ATMIC'] = os.environ.get('CPL_ATMIC', 'workflow_C48_refactored') prefix = f"{cpl_ic['BASE_CPLIC']}/{cpl_ic['CPL_ATMIC']}/@Y@m@d@H/atmos" for file in ['gfs_ctrl.nc'] + \ [f'{datatype}_data.tile{tile}.nc' @@ -663,7 +665,9 @@ def ocnanalprep(self): deps = [] dep_dict = {'type': 'task', 'name': f'{self.cdump}prepoceanobs'} deps.append(rocoto.add_dependency(dep_dict)) - dependencies = rocoto.create_dependency(dep=deps) + dep_dict = {'type': 'task', 'name': 'gdasfcst', 'offset': f"-{timedelta_to_HMS(self._base['cycle_interval'])}"} + deps.append(rocoto.add_dependency(dep_dict)) + dependencies = rocoto.create_dependency(dep_condition='and', dep=deps) resources = self.get_resource('ocnanalprep') task_name = f'{self.cdump}ocnanalprep' @@ -1448,16 +1452,21 @@ def awips_20km_1p0deg(self): def gempak(self): deps = [] - dep_dict = {'type': 'metatask', 'name': f'{self.cdump}atmos_prod'} + dep_dict = {'type': 'task', 'name': f'{self.cdump}atmos_prod_f#fhr#'} deps.append(rocoto.add_dependency(dep_dict)) dependencies = rocoto.create_dependency(dep=deps) + gempak_vars = self.envars.copy() + gempak_dict = {'FHR3': '#fhr#'} + for key, value in gempak_dict.items(): + gempak_vars.append(rocoto.create_envar(name=key, value=str(value))) + resources = self.get_resource('gempak') - task_name = f'{self.cdump}gempak' + task_name = f'{self.cdump}gempak_f#fhr#' task_dict = {'task_name': task_name, 'resources': resources, 'dependency': dependencies, - 'envars': self.envars, + 'envars': gempak_vars, 'cycledef': self.cdump.replace('enkf', ''), 'command': f'{self.HOMEgfs}/jobs/rocoto/gempak.sh', 'job_name': f'{self.pslot}_{task_name}_@H', @@ -1465,13 +1474,20 @@ def gempak(self): 'maxtries': '&MAXTRIES;' } - task = rocoto.create_task(task_dict) + fhrs = self._get_forecast_hours(self.cdump, self._configs['gempak']) + fhr_var_dict = {'fhr': ' '.join([f"{fhr:03d}" for fhr in fhrs])} + + fhr_metatask_dict = {'task_name': f'{self.cdump}gempak', + 'task_dict': task_dict, + 'var_dict': fhr_var_dict} + + task = rocoto.create_task(fhr_metatask_dict) return task def gempakmeta(self): deps = [] - dep_dict = {'type': 'task', 'name': f'{self.cdump}gempak'} + dep_dict = {'type': 'metatask', 'name': f'{self.cdump}gempak'} deps.append(rocoto.add_dependency(dep_dict)) dependencies = rocoto.create_dependency(dep=deps) @@ -1494,7 +1510,7 @@ def gempakmeta(self): def gempakmetancdc(self): deps = [] - dep_dict = {'type': 'task', 'name': f'{self.cdump}gempak'} + dep_dict = {'type': 'metatask', 'name': f'{self.cdump}gempak'} deps.append(rocoto.add_dependency(dep_dict)) dependencies = rocoto.create_dependency(dep=deps) @@ -1517,7 +1533,7 @@ def gempakmetancdc(self): def gempakncdcupapgif(self): deps = [] - dep_dict = {'type': 'task', 'name': f'{self.cdump}gempak'} + dep_dict = {'type': 'metatask', 'name': f'{self.cdump}gempak'} deps.append(rocoto.add_dependency(dep_dict)) dependencies = rocoto.create_dependency(dep=deps) @@ -1544,12 +1560,17 @@ def gempakpgrb2spec(self): deps.append(rocoto.add_dependency(dep_dict)) dependencies = rocoto.create_dependency(dep=deps) + gempak_vars = self.envars.copy() + gempak_dict = {'FHR3': '#fhr#'} + for key, value in gempak_dict.items(): + gempak_vars.append(rocoto.create_envar(name=key, value=str(value))) + resources = self.get_resource('gempak') - task_name = f'{self.cdump}gempakgrb2spec' + task_name = f'{self.cdump}gempakgrb2spec_f#fhr#' task_dict = {'task_name': task_name, 'resources': resources, 'dependency': dependencies, - 'envars': self.envars, + 'envars': gempak_vars, 'cycledef': self.cdump.replace('enkf', ''), 'command': f'{self.HOMEgfs}/jobs/rocoto/gempakgrb2spec.sh', 'job_name': f'{self.pslot}_{task_name}_@H', @@ -1557,7 +1578,23 @@ def gempakpgrb2spec(self): 'maxtries': '&MAXTRIES;' } - task = rocoto.create_task(task_dict) + # Override forecast lengths locally to be that of gempak goes job + local_config = self._configs['gempak'] + goes_times = { + 'FHMAX_HF_GFS': 0, + 'FHMAX_GFS': local_config['FHMAX_GOES'], + 'FHOUT_GFS': local_config['FHOUT_GOES'], + } + local_config.update(goes_times) + + fhrs = self._get_forecast_hours(self.cdump, local_config) + fhr_var_dict = {'fhr': ' '.join([f"{fhr:03d}" for fhr in fhrs])} + + fhr_metatask_dict = {'task_name': f'{self.cdump}gempakgrb2spec', + 'task_dict': task_dict, + 'var_dict': fhr_var_dict} + + task = rocoto.create_task(fhr_metatask_dict) return task @@ -2240,7 +2277,7 @@ def cleanup(self): dep_dict = {'type': 'task', 'name': f'{self.cdump}gempakncdcupapgif'} deps.append(rocoto.add_dependency(dep_dict)) if self.app_config.do_goes: - dep_dict = {'type': 'task', 'name': f'{self.cdump}gempakgrb2spec'} + dep_dict = {'type': 'metatask', 'name': f'{self.cdump}gempakgrb2spec'} deps.append(rocoto.add_dependency(dep_dict)) dep_dict = {'type': 'task', 'name': f'{self.cdump}npoess_pgrb2_0p5deg'} deps.append(rocoto.add_dependency(dep_dict)) diff --git a/workflow/rocoto/tasks.py b/workflow/rocoto/tasks.py index 253de03a4e4..dc6d3613cf1 100644 --- a/workflow/rocoto/tasks.py +++ b/workflow/rocoto/tasks.py @@ -245,6 +245,7 @@ def get_resource(self, task_name): #as below. Or, it won't run, but with an error: #"ufs_model.x: error while loading shared libraries: libiomp5.so: cannot open shared object file: No such file or directory" #Even the library path is clearly in LD_LIBRARY_PATH, or load exactly the modules when build ufs_model.x + #TODO: find a mechanism to provide native scheduler information. pw_csp = os.environ.get('PW_CSP', 'unknown') if ( pw_csp in ['aws', 'azure', 'google'] ): native = '--export=ALL --exclusive' @@ -252,7 +253,7 @@ def get_resource(self, task_name): native = '--export=NONE' if task_config['RESERVATION'] != "": native += '' if task_name in Tasks.SERVICE_TASKS else ' --reservation=' + task_config['RESERVATION'] - if task_config['CLUSTERS'] not in ["", '@CLUSTERS@']: + if task_config.get('CLUSTERS', "") not in ["", '@CLUSTERS@']: native += ' --clusters=' + task_config['CLUSTERS'] queue = task_config['QUEUE_SERVICE'] if task_name in Tasks.SERVICE_TASKS else task_config['QUEUE']