diff --git a/.github/workflows/testing.yml b/.github/workflows/testing.yml index 24db7fe8..feb203e8 100644 --- a/.github/workflows/testing.yml +++ b/.github/workflows/testing.yml @@ -24,7 +24,7 @@ jobs: shell: bash -el {0} strategy: matrix: - python-version: ["3.9", "3.10"] + python-version: ["3.9", "3.10", "3.12"] steps: - uses: actions/checkout@v3 diff --git a/demos/premade_run_directories/common_files/MOM_input b/demos/premade_run_directories/common_files/MOM_input index b1beab7c..8b4ccc70 100755 --- a/demos/premade_run_directories/common_files/MOM_input +++ b/demos/premade_run_directories/common_files/MOM_input @@ -72,6 +72,7 @@ TOPO_CONFIG = "file" ! ! Phillips - ACC-like idealized topography used in the Phillips config. ! dense - Denmark Strait-like dense water formation and overflow. ! USER - call a user modified routine. +TOPO_FILE = "bathymetry.nc" ! Name of the file containing bathymetry information. MINIMUM_DEPTH = 4.5 ! [m] default = 0.0 ! If MASKING_DEPTH is unspecified, then anything shallower than MINIMUM_DEPTH is ! assumed to be land and all fluxes are masked out. If MASKING_DEPTH is diff --git a/demos/reanalysis-forced.ipynb b/demos/reanalysis-forced.ipynb index 82607591..72f56527 100644 --- a/demos/reanalysis-forced.ipynb +++ b/demos/reanalysis-forced.ipynb @@ -288,16 +288,14 @@ " glorys_path / \"ic_unprocessed.nc\", # directory where the unprocessed initial condition is stored, as defined earlier\n", " ocean_varnames,\n", " arakawa_grid=\"A\"\n", - " )\n", + " ) \n", "\n", - "# Now iterate through our four boundaries \n", - "for i, orientation in enumerate([\"south\", \"north\", \"west\", \"east\"]):\n", - " expt.rectangular_boundary(\n", - " glorys_path / (orientation + \"_unprocessed.nc\"),\n", + "# Set up the four boundary conditions. Remember that in the glorys_path, we have four boundary files names north_unprocessed.nc etc. \n", + "expt.rectangular_boundaries(\n", + " glorys_path,\n", " ocean_varnames,\n", - " orientation, # Needs to know the cardinal direction of the boundary\n", - " i + 1, # Just a number to identify the boundary. Indexes from 1 \n", - " arakawa_grid=\"A\"\n", + " boundaries = [\"south\", \"north\", \"west\", \"east\"],\n", + " arakawa_grid = \"A\"\n", " )" ] }, diff --git a/environment-ci.yml b/environment-ci.yml index 4bc4c43f..a16cf6f7 100644 --- a/environment-ci.yml +++ b/environment-ci.yml @@ -3,3 +3,4 @@ channels: - conda-forge dependencies: - esmpy + - numpy<2.0.0 diff --git a/pyproject.toml b/pyproject.toml index e427db43..dacad296 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -12,7 +12,7 @@ dependencies = [ "dask[array]", "dask[distributed]", "netCDF4", - "numpy >= 1.17.0", + "numpy >= 1.17.0, < 2.0.0", "scipy >= 1.2.0", "xarray", "xesmf >= 0.8.4", diff --git a/regional_mom6/regional_mom6.py b/regional_mom6/regional_mom6.py index ed7ee433..26fa71be 100644 --- a/regional_mom6/regional_mom6.py +++ b/regional_mom6/regional_mom6.py @@ -437,6 +437,7 @@ def __init__( self.grid_type = grid_type self.repeat_year_forcing = repeat_year_forcing self.ocean_mask = None + self.layout = None # This should be a tuple. Leaving in a dummy 'None' makes it easy to remind the user to provide a value later on. if read_existing_grids: try: self.hgrid = xr.open_dataset(self.mom_input_dir / "hgrid.nc") @@ -567,14 +568,18 @@ def _make_vgrid(self): return vcoord def initial_condition( - self, ic_path, varnames, arakawa_grid="A", vcoord_type="height" + self, + raw_ic_path, + varnames, + arakawa_grid="A", + vcoord_type="height", ): """ Reads the initial condition from files in ``ic_path``, interpolates to the model grid, fixes up metadata, and saves back to the input directory. Args: - ic_path (Union[str, Path]): Path to initial condition file. + raw_ic_path (Union[str, Path]): Path to raw initial condition file to read in. varnames (Dict[str, str]): Mapping from MOM6 variable/coordinate names to the names in the input dataset. For example, ``{'xq': 'lonq', 'yh': 'lath', 'salt': 'so', ...}``. arakawa_grid (Optional[str]): Arakawa grid staggering type of the initial condition. @@ -586,7 +591,7 @@ def initial_condition( # Remove time dimension if present in the IC. # Assume that the first time dim is the intended on if more than one is present - ic_raw = xr.open_dataset(ic_path) + ic_raw = xr.open_dataset(raw_ic_path) if varnames["time"] in ic_raw.dims: ic_raw = ic_raw.isel({varnames["time"]: 0}) if varnames["time"] in ic_raw.coords: @@ -616,6 +621,12 @@ def initial_condition( "Error in reading in initial condition tracers. Terminating!" ) + ## if min(temperature) > 100 then assume that units must be degrees K + ## (otherwise we can't be on Earth) and convert to degrees C + if np.nanmin(ic_raw[varnames["tracers"]["temp"]]) > 100: + ic_raw[varnames["tracers"]["temp"]] -= 273.15 + ic_raw[varnames["tracers"]["temp"]].attrs["units"] = "degrees Celsius" + # Rename all coordinates to have 'lon' and 'lat' to work with the xesmf regridder if arakawa_grid == "A": if ( @@ -641,6 +652,7 @@ def initial_condition( + "in the varnames dictionary. For example, {'x': 'lon', 'y': 'lat'}.\n\n" + "Terminating!" ) + if arakawa_grid == "B": if ( "xq" in varnames.keys() @@ -695,6 +707,7 @@ def initial_condition( + "in the varnames dictionary. For example, {'xh': 'lonh', 'yh': 'lath', ...}.\n\n" + "Terminating!" ) + ## Construct the xq, yh and xh, yq grids ugrid = ( self.hgrid[["x", "y"]] @@ -723,9 +736,10 @@ def initial_condition( } ) - ### Drop NaNs to be re-added later # NaNs might be here from the land mask of the model that the IC has come from. # If they're not removed then the coastlines from this other grid will be retained! + # The land mask comes from the bathymetry file, so we don't need NaNs + # to tell MOM6 where the land is. ic_raw_tracers = ( ic_raw_tracers.interpolate_na("lon", method="linear") .ffill("lon") @@ -761,7 +775,7 @@ def initial_condition( .bfill("lat") ) - ## Make our three horizontal regrideers + ## Make our three horizontal regridders regridder_u = xe.Regridder( ic_raw_u, ugrid, @@ -780,8 +794,11 @@ def initial_condition( ) print("INITIAL CONDITIONS") + ## Regrid all fields horizontally. - print("Regridding Velocities...", end="") + + print("Regridding Velocities... ", end="") + vel_out = xr.merge( [ regridder_u(ic_raw_u) @@ -792,18 +809,22 @@ def initial_condition( .rename("v"), ] ) - print("Done.\nRegridding Tracers...") + + print("Done.\nRegridding Tracers... ", end="") + tracers_out = xr.merge( [ regridder_t(ic_raw_tracers[varnames["tracers"][i]]).rename(i) for i in varnames["tracers"] ] ).rename({"lon": "xh", "lat": "yh", varnames["zl"]: "zl"}) - print("Done.\nRegridding Free surface...") + + print("Done.\nRegridding Free surface... ", end="") eta_out = ( regridder_t(ic_raw_eta).rename({"lon": "xh", "lat": "yh"}).rename("eta_t") ) ## eta_t is the name set in MOM_input by default + print("Done.") ## Return attributes to arrays @@ -825,11 +846,6 @@ def initial_condition( eta_out.yh.attrs = ic_raw_tracers.lat.attrs eta_out.attrs = ic_raw_eta.attrs - ## if min(temp) > 100 then assume that units must be degrees K - ## (otherwise we can't be on Earth) and convert to degrees C - if np.min(tracers_out["temp"].isel({"zl": 0})) > 100: - tracers_out["temp"] -= 273.15 - ## Regrid the fields vertically if ( @@ -874,19 +890,67 @@ def initial_condition( "eta_t": {"_FillValue": None}, }, ) - print("done setting up initial condition.") self.ic_eta = eta_out self.ic_tracers = tracers_out self.ic_vels = vel_out + + print("done setting up initial condition.") + return - def rectangular_boundary( + def rectangular_boundaries( + self, + raw_boundaries_path, + varnames, + boundaries=["south", "north", "west", "east"], + arakawa_grid="A", + ): + """ + This function is a wrapper for `simple_boundary`. Given a list of up to four cardinal directions, + it creates a boundary forcing file for each one. Ensure that the raw boundaries are all saved in the same directory, + and that they are named using the format `east_unprocessed.nc` + + Args: + raw_boundaries_path (str): Path to the directory containing the raw boundary forcing files. + varnames (Dict[str, str]): Mapping from MOM6 variable/coordinate names to the name in the + input dataset. + boundaries (List[str]): List of cardinal directions for which to create boundary forcing files. + Default is `["south", "north", "west", "east"]`. + arakawa_grid (Optional[str]): Arakawa grid staggering type of the boundary forcing. + Either ``'A'`` (default), ``'B'``, or ``'C'``. + """ + for i in boundaries: + if i not in ["south", "north", "west", "east"]: + raise ValueError( + f"Invalid boundary direction: {i}. Must be one of ['south', 'north', 'west', 'east']" + ) + + if len(boundaries) < 4: + print( + "NOTE: the 'setup_run_directories' method assumes that you have four boundaries. You'll need to modify the MOM_input file manually to reflect the number of boundaries you have, and their orientations. You should be able to find the relevant section in the MOM_input file by searching for 'segment_'. Ensure that the segment names match those in your inputdir/forcing folder" + ) + + if len(boundaries) > 4: + raise ValueError( + "This method only supports up to four boundaries. To set up more complex boundary shapes you can manually call the 'simple_boundary' method for each boundary." + ) + # Now iterate through our four boundaries + for i, orientation in enumerate(boundaries, start=1): + self.simple_boundary( + Path(raw_boundaries_path) / (orientation + "_unprocessed.nc"), + varnames, + orientation, # The cardinal direction of the boundary + i, # A number to identify the boundary; indexes from 1 + arakawa_grid=arakawa_grid, + ) + + def simple_boundary( self, path_to_bc, varnames, orientation, segment_number, arakawa_grid="A" ): """ - Set up a boundary forcing file for a given orientation. Here the term 'rectangular' - means boundaries along lines of constant latitude or longitude. + Here 'simple' refers to boundaries that are parallel to lines of constant longitude or latitude. + Set up a boundary forcing file for a given orientation. Args: path_to_bc (str): Path to boundary forcing file. Ideally this should be a pre cut-out @@ -904,7 +968,10 @@ def rectangular_boundary( """ print("Processing {} boundary...".format(orientation), end="") - + if not path_to_bc.exists(): + raise FileNotFoundError( + f"Boundary file not found at {path_to_bc}. Please ensure that the files are named in the format `east_unprocessed.nc`." + ) seg = segment( hgrid=self.hgrid, infile=path_to_bc, # location of raw boundary @@ -1414,6 +1481,21 @@ def setup_run_directory( premade_rundir_path = Path( importlib.resources.files("regional_mom6") / "demos/premade_run_directories" ) + if not premade_rundir_path.exists(): + print("Could not find premade run directories at ", premade_rundir_path) + print( + "Perhaps the package was imported directly rather than installed with conda. Checking if this is the case... " + ) + + premade_rundir_path = Path( + importlib.resources.files("regional_mom6").parent + / "demos/premade_run_directories" + ) + if not premade_rundir_path.exists(): + raise ValueError( + f"Cannot find the premade run directory files at {premade_rundir_path} either.\n\n" + + "There may be an issue with package installation. Check that the `premade_run_directory` folder is present in one of these two locations" + ) # Define the locations of the directories we'll copy files across from. Base contains most of the files, and overwrite replaces files in the base directory. base_run_dir = premade_rundir_path / "common_files" @@ -1479,18 +1561,32 @@ def setup_run_directory( mask_table = p.name x, y = (int(v) for v in layout.split("x")) ncpus = (x * y) - int(masked) - if mask_table == None: + layout = ( + x, + y, + ) # This is a local variable keeping track of the layout as read from the mask table. Not to be confused with self.layout which is unchanged and may differ. + print( - "No mask table found! This suggests the domain is mostly water, so there are " - + "no `non compute` cells that are entirely land. If this doesn't seem right, " - + "ensure you've already run `FRE_tools`." + f"Mask table {p.name} read. Using this to infer the cpu layout {layout}, total masked out cells {masked}, and total number of CPUs {ncpus}." ) - if not hasattr(self, "layout"): + + if mask_table == None: + if self.layout == None: raise AttributeError( - "No layout information found. This suggests that `FRE_tools()` hasn't been called yet. " - + "Please do so, in order for the number of processors required is computed." + "No mask table found, and the cpu layout has not been set. At least one of these is requiret to set up the experiment." ) - ncpus = self.layout[0] * self.layout[1] + print( + f"No mask table found, but the cpu layout has been set to {self.layout} This suggests the domain is mostly water, so there are " + + "no `non compute` cells that are entirely land. If this doesn't seem right, " + + "ensure you've already run the `FRE_tools` method which sets up the cpu mask table. Keep an eye on any errors that might print while" + + "the FRE tools (which run C++ in the background) are running." + ) + # Here we define a local copy of the layout just for use within this function. + # This prevents the layout from being overwritten in the main class in case + # in case the user accidentally loads in the wrong mask table. + layout = self.layout + ncpus = layout[0] * layout[1] + print("Number of CPUs required: ", ncpus) ## Modify the input namelists to give the correct layouts @@ -1504,7 +1600,7 @@ def setup_run_directory( else: lines[jj] = "# MASKTABLE = no mask table" if "LAYOUT =" in lines[jj] and "IO" not in lines[jj]: - lines[jj] = f"LAYOUT = {self.layout[1]},{self.layout[0]}\n" + lines[jj] = f"LAYOUT = {layout[1]},{layout[0]}\n" if "NIGLOBAL" in lines[jj]: lines[jj] = f"NIGLOBAL = {self.hgrid.nx.shape[0]//2}\n" @@ -1572,85 +1668,86 @@ def setup_era5(self, era5_path): years = [ i for i in range(self.date_range[0].year, self.date_range[1].year + 1) ] - # Loop through each year and read the corresponding files - for year in years: - ds = xr.open_mfdataset( - f"{era5_path}/{fname}/{year}/{fname}*", - decode_times=False, - chunks={"longitude": 100, "latitude": 100}, - ) + # construct a list of all paths for all years to use for open_mfdataset + paths_per_year = [Path(f"{era5_path}/{fname}/{year}/") for year in years] + all_files = [] + for path in paths_per_year: + # Use glob to find all files that match the pattern + files = list(path.glob(f"{fname}*.nc")) + # Add the files to the all_files list + all_files.extend(files) + + ds = xr.open_mfdataset( + all_files, + decode_times=False, + chunks={"longitude": 100, "latitude": 100}, + ) - ## Cut out this variable to our domain size - rawdata[fname] = longitude_slicer( - ds, - self.longitude_extent, - "longitude", - ).sel( - latitude=slice( - self.longitude_extent[1], self.longitude_extent[0] - ) ## This is because ERA5 has latitude in decreasing order (??) - ) + ## Cut out this variable to our domain size + rawdata[fname] = longitude_slicer( + ds, + self.longitude_extent, + "longitude", + ).sel( + latitude=slice( + self.latitude_extent[1], self.latitude_extent[0] + ) ## This is because ERA5 has latitude in decreasing order (??) + ) - ## Now fix up the latitude and time dimensions + ## Now fix up the latitude and time dimensions - rawdata[fname] = ( - rawdata[fname] - .isel(latitude=slice(None, None, -1)) ## Flip latitude - .assign_coords( - time=np.arange( - 0, rawdata[fname].time.shape[0], dtype=float - ) ## Set the zero date of forcing to start of run - ) + rawdata[fname] = ( + rawdata[fname] + .isel(latitude=slice(None, None, -1)) ## Flip latitude + .assign_coords( + time=np.arange( + 0, rawdata[fname].time.shape[0], dtype=float + ) ## Set the zero date of forcing to start of run ) + ) - rawdata[fname].time.attrs = { - "calendar": "julian", - "units": f"hours since {self.date_range[0].strftime('%Y-%m-%d %H:%M:%S')}", - } ## Fix up calendar to match - - if fname == "2d": - ## Calculate specific humidity from dewpoint temperature - dewpoint = 8.07131 - 1730.63 / ( - 233.426 + rawdata["2d"]["d2m"] - 273.15 - ) - humidity = ( - (0.622 / rawdata["sp"]["sp"]) * (10**dewpoint) * 101325 / 760 - ) - q = xr.Dataset(data_vars={"q": humidity}) - - q.q.attrs = {"long_name": "Specific Humidity", "units": "kg/kg"} - q.to_netcdf( - f"{self.mom_input_dir}/forcing/q_ERA5.nc", - unlimited_dims="time", - encoding={"q": {"dtype": "double"}}, - ) - elif fname == "crr": - ## Calculate total rain rate from convective and total - trr = xr.Dataset( - data_vars={ - "trr": rawdata["crr"]["crr"] + rawdata["lsrr"]["lsrr"] - } - ) + rawdata[fname].time.attrs = { + "calendar": "julian", + "units": f"hours since {self.date_range[0].strftime('%Y-%m-%d %H:%M:%S')}", + } ## Fix up calendar to match + + if fname == "2d": + ## Calculate specific humidity from dewpoint temperature + dewpoint = 8.07131 - 1730.63 / (233.426 + rawdata["2d"]["d2m"] - 273.15) + humidity = (0.622 / rawdata["sp"]["sp"]) * (10**dewpoint) * 101325 / 760 + q = xr.Dataset(data_vars={"q": humidity}) + + q.q.attrs = {"long_name": "Specific Humidity", "units": "kg/kg"} + q.to_netcdf( + f"{self.mom_input_dir}/forcing/q_ERA5.nc", + unlimited_dims="time", + encoding={"q": {"dtype": "double"}}, + ) + elif fname == "crr": + ## Calculate total rain rate from convective and total + trr = xr.Dataset( + data_vars={"trr": rawdata["crr"]["crr"] + rawdata["lsrr"]["lsrr"]} + ) - trr.trr.attrs = { - "long_name": "Total Rain Rate", - "units": "kg m**-2 s**-1", - } - trr.to_netcdf( - f"{self.mom_input_dir}/forcing/trr_ERA5-{year}.nc", - unlimited_dims="time", - encoding={"trr": {"dtype": "double"}}, - ) + trr.trr.attrs = { + "long_name": "Total Rain Rate", + "units": "kg m**-2 s**-1", + } + trr.to_netcdf( + f"{self.mom_input_dir}/forcing/trr_ERA5.nc", + unlimited_dims="time", + encoding={"trr": {"dtype": "double"}}, + ) - elif fname == "lsrr": - ## This is handled by crr as both are added together to calculate total rain rate. - pass - else: - rawdata[fname].to_netcdf( - f"{self.mom_input_dir}/forcing/{fname}_ERA5-{year}.nc", - unlimited_dims="time", - encoding={vname: {"dtype": "double"}}, - ) + elif fname == "lsrr": + ## This is handled by crr as both are added together to calculate total rain rate. + pass + else: + rawdata[fname].to_netcdf( + f"{self.mom_input_dir}/forcing/{fname}_ERA5.nc", + unlimited_dims="time", + encoding={vname: {"dtype": "double"}}, + ) class segment: @@ -1909,10 +2006,11 @@ def rectangular_brushcut(self): del segment_out["lat"] ## Convert temperatures to celsius # use pint if ( - np.min(segment_out[self.tracers["temp"]].isel({self.time: 0, self.z: 0})) + np.nanmin(segment_out[self.tracers["temp"]].isel({self.time: 0, self.z: 0})) > 100 ): segment_out[self.tracers["temp"]] -= 273.15 + segment_out[self.tracers["temp"]].attrs["units"] = "degrees Celsius" # fill in NaNs segment_out = ( diff --git a/tests/test_expt_class.py b/tests/test_expt_class.py index d61f2afe..d0713c2f 100644 --- a/tests/test_expt_class.py +++ b/tests/test_expt_class.py @@ -100,6 +100,97 @@ def test_setup_bathymetry( bathymetry_file.unlink() +def number_of_gridpoints(longitude_extent, latitude_extent, resolution): + nx = int((longitude_extent[-1] - longitude_extent[0]) / resolution) + ny = int((latitude_extent[-1] - latitude_extent[0]) / resolution) + + return nx, ny + + +def generate_temperature_arrays(nx, ny, number_vertical_layers): + + # temperatures close to 0 ᵒC + temp_in_C = np.random.randn(ny, nx, number_vertical_layers) + + temp_in_C_masked = np.copy(temp_in_C) + if int(ny / 4 + 4) < ny - 1 and int(nx / 3 + 4) < nx + 1: + temp_in_C_masked[ + int(ny / 3) : int(ny / 3 + 5), int(nx) : int(nx / 4 + 4), : + ] = float("nan") + else: + raise ValueError("use bigger domain") + + temp_in_K = np.copy(temp_in_C) + 273.15 + temp_in_K_masked = np.copy(temp_in_C_masked) + 273.15 + + # ensure we didn't mask the minimum temperature + if np.nanmin(temp_in_C_masked) == np.min(temp_in_C): + return temp_in_C, temp_in_C_masked, temp_in_K, temp_in_K_masked + else: + return generate_temperature_arrays(nx, ny, number_vertical_layers) + + +def generate_silly_coords( + longitude_extent, latitude_extent, resolution, depth, number_vertical_layers +): + nx, ny = number_of_gridpoints(longitude_extent, latitude_extent, resolution) + + horizontal_buffer = 5 + + silly_lat = np.linspace( + latitude_extent[0] - horizontal_buffer, + latitude_extent[1] + horizontal_buffer, + ny, + ) + silly_lon = np.linspace( + longitude_extent[0] - horizontal_buffer, + longitude_extent[1] + horizontal_buffer, + nx, + ) + silly_depth = np.linspace(0, depth, number_vertical_layers) + + return silly_lat, silly_lon, silly_depth + + +longitude_extent = [-5, 3] +latitude_extent = (0, 10) +date_range = ["2003-01-01 00:00:00", "2003-01-01 00:00:00"] +resolution = 0.1 +number_vertical_layers = 5 +layer_thickness_ratio = 1 +depth = 1000 + +silly_lat, silly_lon, silly_depth = generate_silly_coords( + longitude_extent, latitude_extent, resolution, depth, number_vertical_layers +) + +dims = ["silly_lat", "silly_lon", "silly_depth"] + +coords = {"silly_lat": silly_lat, "silly_lon": silly_lon, "silly_depth": silly_depth} + +mom_run_dir = "rundir/" +mom_input_dir = "inputdir/" +toolpath_dir = "toolpath" +grid_type = "even_spacing" + +nx, ny = number_of_gridpoints(longitude_extent, latitude_extent, resolution) + +temp_in_C, temp_in_C_masked, temp_in_K, temp_in_K_masked = generate_temperature_arrays( + nx, ny, number_vertical_layers +) + +temp_C = xr.DataArray(temp_in_C, dims=dims, coords=coords) +temp_K = xr.DataArray(temp_in_K, dims=dims, coords=coords) +temp_C_masked = xr.DataArray(temp_in_C_masked, dims=dims, coords=coords) +temp_K_masked = xr.DataArray(temp_in_K_masked, dims=dims, coords=coords) + +maximum_temperature_in_C = np.max(temp_in_C) + + +@pytest.mark.parametrize( + "temp_dataarray_initial_condition", + [temp_C, temp_C_masked, temp_K, temp_K_masked], +) @pytest.mark.parametrize( ( "longitude_extent", @@ -116,13 +207,13 @@ def test_setup_bathymetry( ), [ ( - [-5, 5], - (0, 10), - ["2003-01-01 00:00:00", "2003-01-01 00:00:00"], - 0.1, - 5, - 1, - 1000, + longitude_extent, + latitude_extent, + date_range, + resolution, + number_vertical_layers, + layer_thickness_ratio, + depth, "rundir/", "inputdir/", "toolpath", @@ -142,8 +233,22 @@ def test_ocean_forcing( mom_input_dir, toolpath_dir, grid_type, + temp_dataarray_initial_condition, tmp_path, ): + + silly_lat, silly_lon, silly_depth = generate_silly_coords( + longitude_extent, latitude_extent, resolution, depth, number_vertical_layers + ) + + dims = ["silly_lat", "silly_lon", "silly_depth"] + + coords = { + "silly_lat": silly_lat, + "silly_lon": silly_lon, + "silly_depth": silly_depth, + } + expt = experiment( longitude_extent=longitude_extent, latitude_extent=latitude_extent, @@ -160,72 +265,34 @@ def test_ocean_forcing( ## Generate some initial condition to test on + nx, ny = number_of_gridpoints(longitude_extent, latitude_extent, resolution) + # initial condition includes, temp, salt, eta, u, v initial_cond = xr.Dataset( { - "temp": xr.DataArray( - np.random.random((100, 100, 10)), - dims=["silly_lat", "silly_lon", "silly_depth"], - coords={ - "silly_lat": np.linspace( - latitude_extent[0] - 5, latitude_extent[1] + 5, 100 - ), - "silly_lon": np.linspace( - longitude_extent[0] - 5, longitude_extent[1] + 5, 100 - ), - "silly_depth": np.linspace(0, 1000, 10), - }, - ), "eta": xr.DataArray( - np.random.random((100, 100)), + np.random.random((ny, nx)), dims=["silly_lat", "silly_lon"], coords={ - "silly_lat": np.linspace( - latitude_extent[0] - 5, latitude_extent[1] + 5, 100 - ), - "silly_lon": np.linspace( - longitude_extent[0] - 5, longitude_extent[1] + 5, 100 - ), + "silly_lat": silly_lat, + "silly_lon": silly_lon, }, ), + "temp": temp_dataarray_initial_condition, "salt": xr.DataArray( - np.random.random((100, 100, 10)), - dims=["silly_lat", "silly_lon", "silly_depth"], - coords={ - "silly_lat": np.linspace( - latitude_extent[0] - 5, latitude_extent[1] + 5, 100 - ), - "silly_lon": np.linspace( - longitude_extent[0] - 5, longitude_extent[1] + 5, 100 - ), - "silly_depth": np.linspace(0, 1000, 10), - }, + np.random.random((ny, nx, number_vertical_layers)), + dims=dims, + coords=coords, ), "u": xr.DataArray( - np.random.random((100, 100, 10)), - dims=["silly_lat", "silly_lon", "silly_depth"], - coords={ - "silly_lat": np.linspace( - latitude_extent[0] - 5, latitude_extent[1] + 5, 100 - ), - "silly_lon": np.linspace( - longitude_extent[0] - 5, longitude_extent[1] + 5, 100 - ), - "silly_depth": np.linspace(0, 1000, 10), - }, + np.random.random((ny, nx, number_vertical_layers)), + dims=dims, + coords=coords, ), "v": xr.DataArray( - np.random.random((100, 100, 10)), - dims=["silly_lat", "silly_lon", "silly_depth"], - coords={ - "silly_lat": np.linspace( - latitude_extent[0] - 5, latitude_extent[1] + 5, 100 - ), - "silly_lon": np.linspace( - longitude_extent[0] - 5, longitude_extent[1] + 5, 100 - ), - "silly_depth": np.linspace(0, 1000, 10), - }, + np.random.random((ny, nx, number_vertical_layers)), + dims=dims, + coords=coords, ), } ) @@ -251,6 +318,12 @@ def test_ocean_forcing( arakawa_grid="A", ) + # ensure that temperature is in degrees C + assert np.nanmin(expt.ic_tracers["temp"]) < 100.0 + + # max(temp) can be less maximum_temperature_in_C due to re-gridding + assert np.nanmax(expt.ic_tracers["temp"]) <= maximum_temperature_in_C + @pytest.mark.parametrize( ( @@ -282,7 +355,7 @@ def test_ocean_forcing( ), ], ) -def test_rectangular_boundary( +def test_rectangular_boundaries( longitude_extent, latitude_extent, date_range, @@ -370,7 +443,7 @@ def test_rectangular_boundary( ), } ) - eastern_boundary.to_netcdf(tmp_path / "east_unprocessed") + eastern_boundary.to_netcdf(tmp_path / "east_unprocessed.nc") eastern_boundary.close() expt = experiment( @@ -398,4 +471,4 @@ def test_rectangular_boundary( "tracers": {"temp": "temp", "salt": "salt"}, } - expt.rectangular_boundary(tmp_path / "east_unprocessed", varnames, "east", 1) + expt.rectangular_boundaries(tmp_path, varnames, ["east"])