diff --git a/.github/workflows/unit_tests.yaml b/.github/workflows/unit_tests.yaml index d3b127dc..e780c190 100644 --- a/.github/workflows/unit_tests.yaml +++ b/.github/workflows/unit_tests.yaml @@ -22,6 +22,10 @@ jobs: - name: Install Python packages run: pip3 install .[test] + - name: prepare input eta files + run: | + python tests/grid/generate_eta_files.py + - name: Run serial-cpu tests run: coverage run --rcfile=setup.cfg -m pytest -x tests diff --git a/external/dace b/external/dace index ee5a6dfe..da644fe8 160000 --- a/external/dace +++ b/external/dace @@ -1 +1 @@ -Subproject commit ee5a6dfe695f329c3882105b087f3563a0c80b81 +Subproject commit da644fe8c179022fe8e730fb3f47f6399f1db4ce diff --git a/external/gt4py b/external/gt4py index 32dde792..0ddddd37 160000 --- a/external/gt4py +++ b/external/gt4py @@ -1 +1 @@ -Subproject commit 32dde792bde505807a5729261e4f1d12a1451bdb +Subproject commit 0ddddd37d3056ad6518f33908eb02f3b1f992878 diff --git a/ndsl/grid/eta.py b/ndsl/grid/eta.py index 90db8c4a..35ac510d 100644 --- a/ndsl/grid/eta.py +++ b/ndsl/grid/eta.py @@ -30,10 +30,8 @@ class HybridPressureCoefficients: def _load_ak_bk_from_file(eta_file: str) -> Tuple[np.ndarray, np.ndarray]: - if eta_file == "None": - raise ValueError("eta file not specified") if not os.path.isfile(eta_file): - raise ValueError("file " + eta_file + " does not exist") + raise ValueError(f"eta file {eta_file} does not exist") # read file into ak, bk arrays data = xr.open_dataset(eta_file) diff --git a/ndsl/grid/generation.py b/ndsl/grid/generation.py index 275db563..75437272 100644 --- a/ndsl/grid/generation.py +++ b/ndsl/grid/generation.py @@ -237,7 +237,7 @@ def __init__( dy_const: float = 1000.0, deglat: float = 15.0, extdgrid: bool = False, - eta_file: str = "None", + eta_file: Optional[str] = None, ak: Optional[np.ndarray] = None, bk: Optional[np.ndarray] = None, ): @@ -297,12 +297,34 @@ def __init__( self._dy_center = None self._area = None self._area_c = None - ( - self._ks, - self._ptop, - self._ak, - self._bk, - ) = self._set_hybrid_pressure_coefficients(eta_file, ak, bk) + if eta_file is not None: + ( + self._ks, + self._ptop, + self._ak, + self._bk, + ) = self._set_hybrid_pressure_coefficients(eta_file, ak, bk) + else: + self._ks = self.quantity_factory.zeros( + [], + "", + dtype=Float, + ) + self._ptop = self.quantity_factory.zeros( + [], + "Pa", + dtype=Float, + ) + self._ak = self.quantity_factory.zeros( + [Z_INTERFACE_DIM], + "Pa", + dtype=Float, + ) + self._bk = self.quantity_factory.zeros( + [Z_INTERFACE_DIM], + "", + dtype=Float, + ) self._ec1 = None self._ec2 = None self._ew1 = None diff --git a/ndsl/stencils/corners.py b/ndsl/stencils/corners.py index d83cfac5..5eb7767a 100644 --- a/ndsl/stencils/corners.py +++ b/ndsl/stencils/corners.py @@ -1003,6 +1003,7 @@ def fill_corners_dgrid_defn( with computation(PARALLEL), interval(...): # this line of code is used to fix the missing symbol crash due to the node visitor depth limitation acoef = mysign + x_out = x_out # sw corner with horizontal(region[i_start - 1, j_start - 1]): x_out = mysign * y_in[0, 1, 0] diff --git a/ndsl/stencils/testing/test_translate.py b/ndsl/stencils/testing/test_translate.py index 55ee1001..db8e6047 100644 --- a/ndsl/stencils/testing/test_translate.py +++ b/ndsl/stencils/testing/test_translate.py @@ -210,13 +210,19 @@ def test_sequential_savepoint( near_zero=case.testobj.near_zero, ) if not metric.check: + os.makedirs(OUTDIR, exist_ok=True) + log_filename = os.path.join( + OUTDIR, + f"details-{case.savepoint_name}-{varname}-rank{case.rank}.log", + ) + metric.report(log_filename) pytest.fail(str(metric), pytrace=False) passing_names.append(failing_names.pop()) ref_data_out[varname] = [ref_data] if len(failing_names) > 0: get_thresholds(case.testobj, input_data=original_input_data) os.makedirs(OUTDIR, exist_ok=True) - out_filename = os.path.join(OUTDIR, f"translate-{case.savepoint_name}.nc") + nc_filename = os.path.join(OUTDIR, f"translate-{case.savepoint_name}.nc") input_data_on_host = {} for key, _input in input_data.items(): input_data_on_host[key] = gt_utils.asarray(_input) @@ -226,7 +232,7 @@ def test_sequential_savepoint( [output], ref_data_out, failing_names, - out_filename, + nc_filename, ) if failing_names != []: pytest.fail( @@ -353,11 +359,16 @@ def test_parallel_savepoint( near_zero=case.testobj.near_zero, ) if not metric.check: + os.makedirs(OUTDIR, exist_ok=True) + log_filename = os.path.join( + OUTDIR, f"details-{case.savepoint_name}-{varname}.log" + ) + metric.report(log_filename) pytest.fail(str(metric), pytrace=False) passing_names.append(failing_names.pop()) if len(failing_names) > 0: os.makedirs(OUTDIR, exist_ok=True) - out_filename = os.path.join( + nct_filename = os.path.join( OUTDIR, f"translate-{case.savepoint_name}-{case.grid.rank}.nc" ) try: @@ -370,7 +381,7 @@ def test_parallel_savepoint( [output], ref_data, failing_names, - out_filename, + nct_filename, ) except Exception as error: print(f"TestParallel SaveNetCDF Error: {error}") diff --git a/ndsl/stencils/testing/translate.py b/ndsl/stencils/testing/translate.py index 779d1339..2584deb1 100644 --- a/ndsl/stencils/testing/translate.py +++ b/ndsl/stencils/testing/translate.py @@ -178,6 +178,8 @@ def make_storage_data_input_vars(self, inputs, storage_vars=None, dict_4d=True): for p in self.in_vars["parameters"]: if type(inputs_in[p]) in [np.int64, np.int32]: inputs_out[p] = int(inputs_in[p]) + elif type(inputs_in[p]) is bool: + inputs_out[p] == inputs_in[p] else: inputs_out[p] = Float(inputs_in[p]) for d, info in storage_vars.items(): diff --git a/ndsl/testing/comparison.py b/ndsl/testing/comparison.py index 9812e064..9e2d1d59 100644 --- a/ndsl/testing/comparison.py +++ b/ndsl/testing/comparison.py @@ -1,4 +1,4 @@ -from typing import Union +from typing import List, Optional, Union import numpy as np import numpy.typing as npt @@ -20,6 +20,9 @@ def __str__(self) -> str: def __repr__(self) -> str: ... + def report(self, file_path: Optional[str] = None) -> List[str]: + ... + class LegacyMetric(BaseMetric): """Legacy (AI2) metric used for original FV3 port. @@ -39,12 +42,12 @@ def __init__( ): super().__init__(reference_values, computed_values) self.eps = eps + self._calculated_metric = np.empty_like(self.references) self.success = self._compute_errors( ignore_near_zero_errors, near_zero, ) self.check = np.all(self.success) - self._calculated_metric = np.empty_like(self.references) def _compute_errors( self, @@ -52,9 +55,10 @@ def _compute_errors( near_zero, ) -> npt.NDArray[np.bool_]: if self.references.dtype in (np.float64, np.int64, np.float32, np.int32): - denom = np.abs(self.references) + np.abs(self.computed) + denom = self.references + denom[self.references == 0] = self.computed[self.references == 0] self._calculated_metric = np.asarray( - 2.0 * np.abs(self.computed - self.references) / denom + np.abs((self.computed - self.references) / denom) ) self._calculated_metric[denom == 0] = 0.0 elif self.references.dtype in (np.bool_, bool): @@ -87,67 +91,78 @@ def _compute_errors( ) return success - def __str__(self) -> str: - return self.__repr__() - - def __repr__(self) -> str: + def report(self, file_path: Optional[str] = None) -> List[str]: + report = [] if self.check: - return "✅ No numerical differences" + report.append("✅ No numerical differences") + else: + report.append("❌ Numerical failures") + + found_indices = np.logical_not(self.success).nonzero() + computed_failures = self.computed[found_indices] + reference_failures = self.references[found_indices] + + # List all errors + bad_indices_count = len(found_indices[0]) + # Determine worst result + worst_metric_err = 0.0 + abs_errs = [] + details = [ + "All failures:", + "Index Computed Reference Absloute E Metric E", + ] + for b in range(bad_indices_count): + full_index = tuple([f[b] for f in found_indices]) + + metric_err = self._calculated_metric[full_index] + + absolute_distance = abs(computed_failures[b] - reference_failures[b]) + abs_errs.append(absolute_distance) + + details.append( + f"{full_index} {computed_failures[b]} " + f"{reference_failures[b]} {abs_errs[-1]:.3e} {metric_err:.3e}" + ) - report = [] - report.append("❌ Numerical failures") - - found_indices = np.logical_not(self.success).nonzero() - computed_failures = self.computed[found_indices] - reference_failures = self.references[found_indices] - - # List all errors - bad_indices_count = len(found_indices[0]) - # Determine worst result - worst_metric_err = 0.0 - abs_errs = [] - details = [ - "All failures:", - "Index Computed Reference Absloute E Metric E", - ] - for b in range(bad_indices_count): - full_index = tuple([f[b] for f in found_indices]) - - metric_err = self._calculated_metric[full_index] - - absolute_distance = abs(computed_failures[b] - reference_failures[b]) - abs_errs.append(absolute_distance) - - details.append( - f"{full_index} {computed_failures[b]} " - f"{reference_failures[b]} {abs_errs[-1]:.3e} {metric_err:.3e}" + if np.isnan(metric_err) or (abs(metric_err) > abs(worst_metric_err)): + worst_metric_err = metric_err + worst_full_idx = full_index + worst_abs_err = abs_errs[-1] + computed_worst = computed_failures[b] + reference_worst = reference_failures[b] + # Try to quantify noisy errors + unique_errors = len(np.unique(np.array(abs_errs))) + # Summary and worst result + fullcount = len(self.references.flatten()) + report.append( + f"Failed count: {bad_indices_count}/{fullcount} " + f"({round(100.0 * (bad_indices_count / fullcount), 2)}%),\n" + f"Worst failed index {worst_full_idx}\n" + f" Computed:{computed_worst}\n" + f" Reference: {reference_worst}\n" + f" Absolute diff: {worst_abs_err:.3e}\n" + f" Metric diff: {worst_metric_err:.3e}\n" + f" Metric threshold: {self.eps}\n" + f" Noise quantification:\n" + f" Reference dtype: {type(reference_worst)}\n" + f" Unique errors: {unique_errors}/{bad_indices_count}" ) + report.extend(details) - if np.isnan(metric_err) or (metric_err > worst_metric_err): - worst_metric_err = metric_err - worst_full_idx = full_index - worst_abs_err = abs_errs[-1] - computed_worst = computed_failures[b] - reference_worst = reference_failures[b] - # Try to quantify noisy errors - unique_errors = len(np.unique(np.array(abs_errs))) - # Summary and worst result - fullcount = len(self.references.flatten()) - report.append( - f"Failed count: {bad_indices_count}/{fullcount} " - f"({round(100.0 * (bad_indices_count / fullcount), 2)}%),\n" - f"Worst failed index {worst_full_idx}\n" - f" Computed:{computed_worst}\n" - f" Reference: {reference_worst}\n" - f" Absolute diff: {worst_abs_err:.3e}\n" - f" Metric diff: {worst_metric_err:.3e}\n" - f" Metric threshold: {self.eps}\n" - f" Noise quantification:\n" - f" Reference dtype: {type(reference_worst)}\n" - f" Unique errors: {unique_errors}/{bad_indices_count}" - ) - report.extend(details) + if file_path: + with open(file_path, "w") as fd: + fd.write("\n".join(report)) + + return report + + def __str__(self) -> str: + return self.__repr__() + def __repr__(self) -> str: + report = self.report() + if len(report) > 30: + report = report[:30] # ~10 first errors + report.append("...") return "\n".join(report) @@ -230,36 +245,47 @@ def _compute_all_metrics( f"recieved data with unexpected dtype {self.references.dtype}" ) + def report(self, file_path: Optional[str] = None) -> List[str]: + report = [] + if self.check: + report.append("✅ No numerical differences") + else: + report.append("❌ Numerical failures") + + found_indices = np.logical_not(self.success).nonzero() + # List all errors to terminal and file + bad_indices_count = len(found_indices[0]) + full_count = len(self.references.flatten()) + failures_pct = round(100.0 * (bad_indices_count / full_count), 2) + report = [ + f"All failures ({bad_indices_count}/{full_count}) ({failures_pct}%),\n", + f"Index Computed Reference " + f"Absolute E(<{self.absolute_eps:.2e}) " + f"Relative E(<{self.relative_fraction * 100:.2e}%) " + f"ULP E(<{self.ulp_threshold})", + ] + # Summary and worst result + for iBad in range(bad_indices_count): + fi = tuple([f[iBad] for f in found_indices]) + report.append( + f"{str(fi)} {self.computed[fi]:.16e} {self.references[fi]:.16e} " + f"{self.absolute_distance[fi]:.2e} {'✅' if self.absolute_distance_metric[fi] else '❌'} " + f"{self.relative_distance[fi] * 100:.2e} {'✅' if self.relative_distance_metric[fi] else '❌'} " + f"{int(self.ulp_distance[fi]):02} {'✅' if self.ulp_distance_metric[fi] else '❌'} " + ) + + if file_path: + with open(file_path, "w") as fd: + fd.write("\n".join(report)) + + return report + def __str__(self) -> str: return self.__repr__() def __repr__(self) -> str: - if self.check: - return "✅ No numerical differences" - - report = [] - report.append("❌ Numerical failures") - - found_indices = np.logical_not(self.success).nonzero() - # List all errors - bad_indices_count = len(found_indices[0]) - full_count = len(self.references.flatten()) - failures_pct = round(100.0 * (bad_indices_count / full_count), 2) - report = [ - f"All failures ({bad_indices_count}/{full_count}) ({failures_pct}%),\n", - f"Index Computed Reference " - f"Absolute E(<{self.absolute_eps:.2e}) " - f"Relative E(<{self.relative_fraction*100:.2e}%) " - f"ULP E(<{self.ulp_threshold})", - ] - # Summary and worst result - for iBad in range(bad_indices_count): - fi = tuple([f[iBad] for f in found_indices]) - report.append( - f"({fi[0]:02}, {fi[1]:02}, {fi[2]:02}) {self.computed[fi]:.16e} {self.references[fi]:.16e} " - f"{self.absolute_distance[fi]:.2e} {'✅' if self.absolute_distance_metric[fi] else '❌'} " - f"{self.relative_distance[fi] * 100:.2e} {'✅' if self.relative_distance_metric[fi] else '❌'} " - f"{int(self.ulp_distance[fi]):02} {'✅' if self.ulp_distance_metric[fi] else '❌'} " - ) - + report = self.report() + if len(report) > 12: + report = report[:12] # ~10 first errors + report.append("...") return "\n".join(report) diff --git a/setup.py b/setup.py index f18fe4b9..03dcbf0f 100644 --- a/setup.py +++ b/setup.py @@ -40,7 +40,7 @@ def local_pkg(name: str, relative_path: str) -> str: setup( author="NOAA/NASA", - python_requires=">=3.8", + python_requires=">=3.11", classifiers=[ "Development Status :: 2 - Pre-Alpha", "Intended Audience :: Developers", @@ -57,7 +57,7 @@ def local_pkg(name: str, relative_path: str) -> str: packages=find_namespace_packages(include=["ndsl", "ndsl.*"]), include_package_data=True, url="https://github.com/NOAA-GFDL/NDSL", - version="2024.06.00", + version="2024.09.00", zip_safe=False, entry_points={ "console_scripts": [ diff --git a/tests/grid/generate_eta_files.py b/tests/grid/generate_eta_files.py new file mode 100755 index 00000000..1fb4d5ee --- /dev/null +++ b/tests/grid/generate_eta_files.py @@ -0,0 +1,399 @@ +import numpy as np +import xarray as xr + + +""" +This notebook uses the python xarray module +to create an eta_file containing ak and bk coefficients +for km=79 and km=91. The coefficients are written out to +eta79.nc and eta91.nc netcdf files respectively + +To run this script: `python3 ./generate_eta_files.py` +""" + +# km = 79 +ak = xr.DataArray( + dims=["km1"], + attrs=dict(units="Pa", _FillValue=False), + data=np.array( + [ + 3.000000e02, + 6.467159e02, + 1.045222e03, + 1.469188e03, + 1.897829e03, + 2.325385e03, + 2.754396e03, + 3.191294e03, + 3.648332e03, + 4.135675e03, + 4.668282e03, + 5.247940e03, + 5.876271e03, + 6.554716e03, + 7.284521e03, + 8.066738e03, + 8.902188e03, + 9.791482e03, + 1.073499e04, + 1.162625e04, + 1.237212e04, + 1.299041e04, + 1.349629e04, + 1.390277e04, + 1.422098e04, + 1.446058e04, + 1.462993e04, + 1.473633e04, + 1.478617e04, + 1.478511e04, + 1.473812e04, + 1.464966e04, + 1.452370e04, + 1.436382e04, + 1.417324e04, + 1.395491e04, + 1.371148e04, + 1.344540e04, + 1.315890e04, + 1.285407e04, + 1.253280e04, + 1.219685e04, + 1.184788e04, + 1.148739e04, + 1.111682e04, + 1.073748e04, + 1.035062e04, + 9.957395e03, + 9.558875e03, + 9.156069e03, + 8.749922e03, + 8.341315e03, + 7.931065e03, + 7.519942e03, + 7.108648e03, + 6.698281e03, + 6.290007e03, + 5.884984e03, + 5.484372e03, + 5.089319e03, + 4.700960e03, + 4.320421e03, + 3.948807e03, + 3.587201e03, + 3.236666e03, + 2.898237e03, + 2.572912e03, + 2.261667e03, + 1.965424e03, + 1.685079e03, + 1.421479e03, + 1.175419e03, + 9.476516e02, + 7.388688e02, + 5.497130e02, + 3.807626e02, + 2.325417e02, + 1.054810e02, + -8.381903e-04, + 0.000000e00, + ] + ), +) +bk = xr.DataArray( + dims=["km1"], + attrs=dict(units="None", _FillValue=False), + data=np.array( + [ + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.00106595, + 0.00412866, + 0.00900663, + 0.01554263, + 0.02359921, + 0.03305481, + 0.0438012, + 0.05574095, + 0.06878554, + 0.08285347, + 0.09786981, + 0.1137643, + 0.130471, + 0.1479275, + 0.1660746, + 0.1848558, + 0.2042166, + 0.2241053, + 0.2444716, + 0.2652672, + 0.286445, + 0.3079604, + 0.3297701, + 0.351832, + 0.3741062, + 0.3965532, + 0.4191364, + 0.4418194, + 0.4645682, + 0.48735, + 0.5101338, + 0.5328897, + 0.5555894, + 0.5782067, + 0.6007158, + 0.6230936, + 0.6452944, + 0.6672683, + 0.6889648, + 0.7103333, + 0.7313231, + 0.7518838, + 0.7719651, + 0.7915173, + 0.8104913, + 0.828839, + 0.846513, + 0.8634676, + 0.8796583, + 0.8950421, + 0.9095779, + 0.9232264, + 0.9359506, + 0.9477157, + 0.9584892, + 0.9682413, + 0.9769447, + 0.9845753, + 0.9911126, + 0.9965372, + 1.0, + ] + ), +) +coefficients = xr.Dataset(data_vars={"ak": ak, "bk": bk}) +coefficients.to_netcdf("eta79.nc") + + +# km = 91 +ak = xr.DataArray( + dims=["km1"], + attrs=dict(units="Pa", _FillValue=False), + data=np.array( + [ + 1.00000000e00, + 1.75000000e00, + 2.75000000e00, + 4.09999990e00, + 5.98951054e00, + 8.62932968e00, + 1.22572632e01, + 1.71510906e01, + 2.36545467e01, + 3.21627693e01, + 4.31310921e01, + 5.71100426e01, + 7.46595764e01, + 9.64470978e01, + 1.23169769e02, + 1.55601318e02, + 1.94594009e02, + 2.41047531e02, + 2.95873840e02, + 3.60046967e02, + 4.34604828e02, + 5.20628723e02, + 6.19154846e02, + 7.31296021e02, + 8.58240906e02, + 1.00106561e03, + 1.16092859e03, + 1.33903992e03, + 1.53650012e03, + 1.75448938e03, + 1.99417834e03, + 2.25667407e03, + 2.54317139e03, + 2.85476392e03, + 3.19258569e03, + 3.55775366e03, + 3.95135107e03, + 4.37428662e03, + 4.82711084e03, + 5.31022168e03, + 5.82387793e03, + 6.36904248e03, + 6.94875244e03, + 7.56691992e03, + 8.22634277e03, + 8.93120996e03, + 9.68446191e03, + 1.04822725e04, + 1.13182793e04, + 1.21840771e04, + 1.30655674e04, + 1.39532207e04, + 1.48307285e04, + 1.56872617e04, + 1.65080645e04, + 1.72810996e04, + 1.79942988e04, + 1.86363223e04, + 1.91961797e04, + 1.96640723e04, + 2.00301914e04, + 2.02853691e04, + 2.04215254e04, + 2.04300684e04, + 2.03028730e04, + 2.00323711e04, + 1.96110664e04, + 1.90313848e04, + 1.82866426e04, + 1.73777930e04, + 1.63224639e04, + 1.51444033e04, + 1.38725674e04, + 1.25404785e04, + 1.11834170e04, + 9.83532715e03, + 8.52630664e03, + 7.28224512e03, + 6.12326074e03, + 5.06350684e03, + 4.11124902e03, + 3.27000122e03, + 2.53922729e03, + 1.91530762e03, + 1.39244995e03, + 9.63134766e02, + 6.20599365e02, + 3.57989502e02, + 1.69421387e02, + 5.10314941e01, + 2.48413086e00, + 0.00000000e00, + ] + ), +) +bk = xr.DataArray( + dims=["km1"], + attrs=dict(units="None", _FillValue=False), + data=np.array( + [ + 0.00000000e00, + 0.00000000e00, + 0.00000000e00, + 0.00000000e00, + 0.00000000e00, + 0.00000000e00, + 0.00000000e00, + 0.00000000e00, + 0.00000000e00, + 0.00000000e00, + 0.00000000e00, + 0.00000000e00, + 0.00000000e00, + 0.00000000e00, + 0.00000000e00, + 0.00000000e00, + 0.00000000e00, + 0.00000000e00, + 0.00000000e00, + 0.00000000e00, + 0.00000000e00, + 0.00000000e00, + 0.00000000e00, + 0.00000000e00, + 0.00000000e00, + 0.00000000e00, + 0.00000000e00, + 0.00000000e00, + 0.00000000e00, + 0.00000000e00, + 0.00000000e00, + 0.00000000e00, + 0.00000000e00, + 0.00000000e00, + 0.00000000e00, + 0.00000000e00, + 0.00000000e00, + 0.00000000e00, + 0.00000000e00, + 0.00000000e00, + 0.00000000e00, + 0.00000000e00, + 0.00000000e00, + 3.50123992e-06, + 2.81484008e-05, + 9.38666999e-05, + 2.28561999e-04, + 5.12343016e-04, + 1.04712998e-03, + 1.95625005e-03, + 3.42317997e-03, + 5.58632007e-03, + 8.65428988e-03, + 1.27844000e-02, + 1.81719996e-02, + 2.49934997e-02, + 3.34198996e-02, + 4.36249003e-02, + 5.57769015e-02, + 7.00351968e-02, + 8.65636021e-02, + 1.05520003e-01, + 1.27051994e-01, + 1.51319996e-01, + 1.78477004e-01, + 2.08675995e-01, + 2.42069006e-01, + 2.78813988e-01, + 3.19043010e-01, + 3.62558991e-01, + 4.08596009e-01, + 4.56384987e-01, + 5.05111992e-01, + 5.53902984e-01, + 6.01903021e-01, + 6.48333013e-01, + 6.92534983e-01, + 7.33981013e-01, + 7.72292018e-01, + 8.07236016e-01, + 8.38724971e-01, + 8.66774976e-01, + 8.91497016e-01, + 9.13065016e-01, + 9.31702971e-01, + 9.47658002e-01, + 9.61175978e-01, + 9.72495019e-01, + 9.81844008e-01, + 9.89410996e-01, + 9.95342016e-01, + 1.00000000e00, + ] + ), +) +coefficients = xr.Dataset(data_vars={"ak": ak, "bk": bk}) +coefficients.to_netcdf("eta91.nc") + +# km = diff --git a/tests/grid/test_eta.py b/tests/grid/test_eta.py new file mode 100755 index 00000000..ab0539f8 --- /dev/null +++ b/tests/grid/test_eta.py @@ -0,0 +1,223 @@ +#!/usr/bin/env python3 + +import os + +import numpy as np +import pytest +import xarray as xr + +from ndsl import ( + CubedSphereCommunicator, + CubedSpherePartitioner, + NullComm, + QuantityFactory, + SubtileGridSizer, + TilePartitioner, +) +from ndsl.grid import MetricTerms + + +""" +This test checks to ensure that ak and bk +values are read-in and stored properly. +In addition, this test checks to ensure that +the function set_hybrid_pressure_coefficients +fail as expected if the computed eta values +vary non-mononitically and if the eta_file +is not provided. +""" + + +def set_answers(eta_file): + + """ + Read in the expected values of ak and bk + arrays from the input eta NetCDF files. + """ + + data = xr.open_dataset(eta_file) + return data["ak"].values, data["bk"].values + + +def write_non_mono_eta_file(in_eta_file, out_eta_file): + """ + Reads in file eta79.nc and alters randomly chosen ak/bk values + This tests the expected failure of set_eta_hybrid_coefficients + for coefficients that lead to non-monotonically increasing + eta values + """ + + data = xr.open_dataset(in_eta_file) + data["ak"].values[10] = data["ak"].values[0] + data["bk"].values[20] = 0.0 + + data.to_netcdf(out_eta_file) + + +@pytest.mark.parametrize("km", [79, 91]) +def test_set_hybrid_pressure_coefficients_correct(km): + + """This test checks to see that the ak and bk arrays + are read-in correctly and are stored as + expected. Both values of km=79 and km=91 are + tested and both tests are expected to pass + with the stored ak and bk values agreeing with the + values read-in directly from the NetCDF file. + """ + + working_dir = str(os.getcwd()) + eta_file = f"{working_dir}/eta{km}.nc" + + backend = "numpy" + + layout = (1, 1) + + nz = km + ny = 48 + nx = 48 + nhalo = 3 + + partitioner = CubedSpherePartitioner(TilePartitioner(layout)) + + communicator = CubedSphereCommunicator(NullComm(rank=0, total_ranks=6), partitioner) + + sizer = SubtileGridSizer.from_tile_params( + nx_tile=nx, + ny_tile=ny, + nz=nz, + n_halo=nhalo, + extra_dim_lengths={}, + layout=layout, + tile_partitioner=partitioner.tile, + tile_rank=communicator.tile.rank, + ) + + quantity_factory = QuantityFactory.from_backend(sizer=sizer, backend=backend) + + metric_terms = MetricTerms( + quantity_factory=quantity_factory, communicator=communicator, eta_file=eta_file + ) + + ak_results = metric_terms.ak.data + bk_results = metric_terms.bk.data + ak_answers, bk_answers = set_answers(f"eta{km}.nc") + + if ak_answers.size != ak_results.size: + raise ValueError("Unexpected size of bk") + if bk_answers.size != bk_results.size: + raise ValueError("Unexpected size of ak") + + if not np.array_equal(ak_answers, ak_results): + raise ValueError("Unexpected value of ak") + if not np.array_equal(bk_answers, bk_results): + raise ValueError("Unexpected value of bk") + + +def test_set_hybrid_pressure_coefficients_nofile(): + + """ + This test checks to see that the program + fails when the eta_file is not specified + in the yaml configuration file. + """ + + eta_file = "NULL" + + backend = "numpy" + + layout = (1, 1) + + nz = 79 + ny = 48 + nx = 48 + nhalo = 3 + + partitioner = CubedSpherePartitioner(TilePartitioner(layout)) + + communicator = CubedSphereCommunicator(NullComm(rank=0, total_ranks=6), partitioner) + + sizer = SubtileGridSizer.from_tile_params( + nx_tile=nx, + ny_tile=ny, + nz=nz, + n_halo=nhalo, + extra_dim_lengths={}, + layout=layout, + tile_partitioner=partitioner.tile, + tile_rank=communicator.tile.rank, + ) + + quantity_factory = QuantityFactory.from_backend(sizer=sizer, backend=backend) + + try: + metric_terms = MetricTerms( + quantity_factory=quantity_factory, + communicator=communicator, + eta_file=eta_file, + ) + except Exception as error: + if str(error) == "eta file NULL does not exist": + pytest.xfail("testing eta file not correctly specified") + else: + pytest.fail(f"ERROR {error}") + + +def test_set_hybrid_pressure_coefficients_not_mono(): + + """ + This test checks to see that the program + fails when the computed eta values increase + non-monotonically. For the latter test, the + eta_file is specified in test_config_not_mono.yaml + file and the ak and bk values in the eta_file + have been changed nonsensically to result in + erronenous eta values. + """ + + working_dir = str(os.getcwd()) + in_eta_file = f"{working_dir}/eta79.nc" + out_eta_file = "eta_not_mono_79.nc" + write_non_mono_eta_file(in_eta_file, out_eta_file) + eta_file = out_eta_file + + backend = "numpy" + + layout = (1, 1) + + nz = 79 + ny = 48 + nx = 48 + nhalo = 3 + + partitioner = CubedSpherePartitioner(TilePartitioner(layout)) + + communicator = CubedSphereCommunicator(NullComm(rank=0, total_ranks=6), partitioner) + + sizer = SubtileGridSizer.from_tile_params( + nx_tile=nx, + ny_tile=ny, + nz=nz, + n_halo=nhalo, + extra_dim_lengths={}, + layout=layout, + tile_partitioner=partitioner.tile, + tile_rank=communicator.tile.rank, + ) + + quantity_factory = QuantityFactory.from_backend(sizer=sizer, backend=backend) + + try: + metric_terms = MetricTerms( + quantity_factory=quantity_factory, + communicator=communicator, + eta_file=eta_file, + ) + except Exception as error: + if os.path.isfile(out_eta_file): + os.remove(out_eta_file) + if str(error) == "ETA values are not monotonically increasing": + pytest.xfail("testing eta values are not monotomincally increasing") + else: + pytest.fail( + "ERROR in testing eta values not are not monotonically increasing" + )