diff --git a/config_amwg_default_plots.yaml b/config_amwg_default_plots.yaml index 2f9ac5d06..4071e6761 100644 --- a/config_amwg_default_plots.yaml +++ b/config_amwg_default_plots.yaml @@ -64,10 +64,10 @@ user: 'USER-NAME-NOT-SET' diag_basic_info: #History file string to match (eg. cam.h0 or ocn.pop.h.ecosys.nday1) - # Only affects timeseries as everything else uses timeseries + # Only affects timeseries as everything else uses timeseries # Leave off trailing '.' #Default: cam.h0 - hist_str: cam.h0 + hist_str: cam.h0 #Is this a model vs observations comparison? #If "false" or missing, then a model-model comparison is assumed: @@ -96,14 +96,10 @@ diag_basic_info: #Location where diagnostic plots are stored: cam_diag_plot_loc: /glade/scratch/${user}/ADF/plots - #Use default variable plot settings? - #If "true", then variable-specific plotting attributes as defined in - #ADF/lib/adf_variable_defaults.yaml will be used: - use_defaults: true - - #Location of ADF variable plotting defaults YAML file - #if not using the one in ADF/lib: - # defaults_file: /some/path/to/defaults/file + #Location of ADF variable plotting defaults YAML file: + #If left blank or missing, ADF/lib/adf_variable_defaults.yaml will be used + #Uncomment and change path for custom variable defaults file + #defaults_file: /some/path/to/defaults/file.yaml #Vertical pressure levels (in hPa) on which to plot 3-D variables #when using horizontal (e.g. lat/lon) map projections. @@ -281,6 +277,7 @@ diag_cvdp_info: #These scripts must be located in "scripts/averaging": time_averaging_scripts: - create_climo_files + #- create_TEM_files #To generate TEM files, please un-comment #Name of regridding scripts being used. #These scripts must be located in "scripts/regridding": @@ -300,6 +297,9 @@ plotting_scripts: - zonal_mean - polar_map - cam_taylor_diagram + #- tem #To plot TEM, please un-comment + #- regional_map_multicase #To use this please un-comment and fill-out + #the "region_multicase" section below #List of CAM variables that will be processesd: #If CVDP is to be run PSL, TREFHT, TS and PRECT (or PRECC and PRECL) should be listed @@ -347,4 +347,37 @@ diag_var_list: # +# Options for multi-case regional contour plots (./plotting/regional_map_multicase.py) +# region_multicase: +# region_spec: [slat, nlat, wlon, elon] +# region_time_option: # If calendar, will look for specified years. If zeroanchor will use a nyears starting from year_offset from the beginning of timeseries +# region_start_year: +# region_end_year: +# region_nyear: +# region_year_offset: +# region_month: +# region_season: +# region_variables: + +# Options for TEM diagnostics (./averaging/create_TEM_files.py and ./plotting/temp.py) +#tem_info: + #Location where TEM files are stored: + #If path not specified or commented out, TEM calculation/plots will be skipped +# tem_loc: /glade/scratch/richling/adf-output/ADF-data/TEM/ + + #TEM history file number + #If missing or blank, ADF will default to h4 +# hist_num: h4 + + #Overwrite TEM files, if found? + #If set to false, then TEM creation will be skipped if files are found: +# overwrite_tem_case: false + + #For multi-case + #overwrite_tem_case: + # - false + # - true + +# overwrite_tem_base: false + #END OF FILE diff --git a/config_cam_baseline_example.yaml b/config_cam_baseline_example.yaml index 8fa9e39bc..5da1c0236 100644 --- a/config_cam_baseline_example.yaml +++ b/config_cam_baseline_example.yaml @@ -97,14 +97,10 @@ diag_basic_info: #Location where diagnostic plots are stored: cam_diag_plot_loc: /glade/scratch/${user}/ADF/plots - #Use default variable plot settings? - #If "true", then variable-specific plotting attributes as defined in - #ADF/lib/adf_variable_defaults.yaml will be used: - use_defaults: true - - #Location of ADF variable plotting defaults YAML file - #if not using the one in ADF/lib: - # defaults_file: /some/path/to/defaults/file + #Location of ADF variable plotting defaults YAML file: + #If left blank or missing, ADF/lib/adf_variable_defaults.yaml will be used + #Uncomment and change path for custom variable defaults file + #defaults_file: /some/path/to/defaults/file.yaml #Vertical pressure levels (in hPa) on which to plot 3-D variables #when using horizontal (e.g. lat/lon) map projections. @@ -131,7 +127,6 @@ diag_basic_info: #If set to false, then if a plot is found it will be skipped: redo_plot: false - #This second set of variables provides info for the CAM simulation(s) being diagnosed: diag_cam_climo: @@ -223,8 +218,8 @@ diag_cam_climo: # - /glade/p/cesm/ADF/b1850.f19_g17.validation_mct.004 #cam_climo_loc: - # -/some/where/you/want/to/have/climo_files/ #MUST EDIT! - # -/the/same/or/some/other/climo/files/location + # - /some/where/you/want/to/have/climo_files/ #MUST EDIT! + # - /the/same/or/some/other/climo/files/location #start_year: # - 1990 @@ -247,7 +242,7 @@ diag_cam_climo: # - false #cam_ts_loc: - # - /some/where/you/want/to/have/time_series_files/ + # - /some/where/you/want/to/have/time_series_files # - /same/or/different/place/you/want/files #---------------------- @@ -346,6 +341,7 @@ diag_cvdp_info: #These scripts must be located in "scripts/averaging": time_averaging_scripts: - create_climo_files + #- create_TEM_files #To generate TEM files, please un-comment #Name of regridding scripts being used. #These scripts must be located in "scripts/regridding": @@ -367,6 +363,7 @@ plotting_scripts: - polar_map - cam_taylor_diagram - qbo + #- tem #To plot TEM, please un-comment #- regional_map_multicase #To use this please un-comment and fill-out #the "region_multicase" section below @@ -404,5 +401,25 @@ diag_var_list: # region_season: # region_variables: +# Options for TEM diagnostics (./averaging/create_TEM_files.py and ./plotting/temp.py) +#tem_info: + #Location where TEM files are stored: + #If path not specified or commented out, TEM calculation/plots will be skipped +# tem_loc: /glade/scratch/richling/adf-output/ADF-data/TEM/ + + #TEM history file number + #If missing or blank, ADF will default to h4 +# hist_num: h4 + + #Overwrite TEM files, if found? + #If set to false, then TEM creation will be skipped if files are found: +# overwrite_tem_case: false + + #For multi-case + #overwrite_tem_case: + # - false + # - true + +# overwrite_tem_base: false #END OF FILE diff --git a/lib/adf_diag.py b/lib/adf_diag.py index e0bac2833..88d34f32f 100644 --- a/lib/adf_diag.py +++ b/lib/adf_diag.py @@ -17,6 +17,7 @@ import glob import subprocess import multiprocessing as mp +import copy import importlib @@ -181,6 +182,14 @@ def __init__(self, config_file, debug=False): #Add plotting script names: self.__plotting_scripts = self.read_config_var('plotting_scripts') + # Create property needed to return "plotting_scripts" variable to user: + @property + def plotting_scripts(self): + """Return a copy of the '__plotting_scripts' string list to user if requested.""" + #Note that a copy is needed in order to avoid having a script mistakenly + #modify this variable: + return copy.copy(self.__plotting_scripts) + ######### #Variable extraction functions ######### @@ -387,9 +396,6 @@ def call_ncrcat(cmd): start_year = start_years[case_idx] end_year = end_years[case_idx] - print(type(start_year)) - print(type(end_year)) - #Create path object for the CAM history file(s) location: starting_location = Path(cam_hist_locs[case_idx]) diff --git a/lib/adf_info.py b/lib/adf_info.py index d4c9cdeac..3771d7822 100644 --- a/lib/adf_info.py +++ b/lib/adf_info.py @@ -117,8 +117,6 @@ def __init__(self, config_file, debug=False): #Case names: case_names = self.get_cam_info('cam_case_name', required=True) - print(case_names) - #Grab test case nickname(s) test_nicknames = self.get_cam_info('case_nickname') if test_nicknames is None: @@ -206,6 +204,11 @@ def __init__(self, config_file, debug=False): #End if #End if + #Grab baseline nickname + base_nickname = self.get_baseline_info('case_nickname') + if base_nickname == None: + base_nickname = data_name + #Update baseline case name: data_name += f"_{syear_baseline}_{eyear_baseline}" #End if (compare_obs) @@ -444,7 +447,8 @@ def climo_yrs(self): return {"syears":syears,"eyears":eyears, "syear_baseline":self.__syear_baseline, "eyear_baseline":self.__eyear_baseline} - # Create property needed to return the climo start (syear) and end (eyear) years to user: + + # Create property needed to return the case nicknames to user: @property def case_nicknames(self): """Return the test case and baseline nicknames to the user if requested.""" diff --git a/lib/adf_obs.py b/lib/adf_obs.py index e38bb8f29..2f7be03b2 100644 --- a/lib/adf_obs.py +++ b/lib/adf_obs.py @@ -66,24 +66,21 @@ def __init__(self, config_file, debug=False): #Determine local directory: _adf_lib_dir = Path(__file__).parent - #Determine if variable defaults will be used: - self.__use_defaults = self.get_basic_info('use_defaults') - # Check whether user wants to use defaults: #----------------------------------------- - if self.__use_defaults: - #Determine whether to use adf defaults or custom: - _defaults_file = self.get_basic_info('defaults_file') - if _defaults_file is None: - _defaults_file = _adf_lib_dir/'adf_variable_defaults.yaml' - - #Open YAML file: - with open(_defaults_file, encoding='UTF-8') as dfil: - self.__variable_defaults = yaml.load(dfil, Loader=yaml.SafeLoader) + #Determine whether to use adf defaults or custom: + _defaults_file = self.get_basic_info('defaults_file') + if _defaults_file is None: + _defaults_file = _adf_lib_dir/'adf_variable_defaults.yaml' else: - #Set variable_defaults to empty dictionary: - self.__variable_defaults = {} + print(f"\n\t Not using ADF default variables yaml file, instead using {_defaults_file}\n") #End if + + #Open YAML file: + with open(_defaults_file, encoding='UTF-8') as dfil: + self.__variable_defaults = yaml.load(dfil, Loader=yaml.SafeLoader) + + _variable_defaults = self.__variable_defaults #----------------------------------------- #Check if land or ocean mask is requested, and if so then add OCNFRAC @@ -96,11 +93,11 @@ def __init__(self, config_file, debug=False): for var in self.diag_var_list: #Check if any variable wants a land or ocean mask: if var in self.__variable_defaults: - if 'mask' in self.__variable_defaults[var]: - #Variable needs a mask, so add "OCNFRAC" to - #the variable list: - self.add_diag_var('OCNFRAC') - break + if 'mask' in self.__variable_defaults[var]: + #Variable needs a mask, so add "OCNFRAC" to + #the variable list: + self.add_diag_var('OCNFRAC') + break #End if #End if #End for @@ -118,21 +115,6 @@ def __init__(self, config_file, debug=False): #Extract the "obs_data_loc" default observational data location: obs_data_loc = self.get_basic_info("obs_data_loc") - #Check that a variable defaults file exists (as it is currently needed to extract obs data): - if not self.__variable_defaults: - #Determine whether to use adf defaults or custom: - _defaults_file = self.get_basic_info('defaults_file') - if _defaults_file is None: - _defaults_file = _adf_lib_dir/'adf_variable_defaults.yaml' - - #Open YAML file (but don't assign to object): - with open(_defaults_file, encoding='UTF-8') as nfil: - _variable_defaults = yaml.load(nfil, Loader=yaml.SafeLoader) - else: - #Set local variable to stored variable defaults dictionary: - _variable_defaults = self.__variable_defaults - #End if - #Loop over variable list: for var in self.diag_var_list: @@ -229,12 +211,6 @@ def variable_defaults(self): #modify this variable, as it is mutable and thus passed by reference: return copy.copy(self.__variable_defaults) - # Create property needed to return "use_defaults" variable to user: - @property - def use_defaults(self): - """Return the '__use_defaults' logical to the user if requested.""" - return self.__use_defaults - # Create property needed to return "var_obs_dict" dictionary to user: @property def var_obs_dict(self): diff --git a/lib/adf_variable_defaults.yaml b/lib/adf_variable_defaults.yaml index e5ff3e53b..af554f4d6 100644 --- a/lib/adf_variable_defaults.yaml +++ b/lib/adf_variable_defaults.yaml @@ -942,5 +942,88 @@ OMEGAT: diff_colormap: "coolwarm" plot_log_pressure: True +#++++++++++++++ +# Category: TEM +#++++++++++++++ +uzm: + ylim: [1e3,1] + units: m s-1 + long_name: Zonal-Mean zonal wind + obs_file: "TEM_ERA5.nc" + obs_name: "ERA5" + obs_var_name: "uzm" + +vzm: + ylim: [1e3,1] + units: m s-1 + long_name: Zonal-Mean meridional wind + obs_file: "TEM_ERA5.nc" + obs_name: "ERA5" + obs_var_name: "vzm" + +epfy: + ylim: [1e2,1] + units: m3 s−2 + long_name: northward component of the Eliassen–Palm flux + obs_file: "TEM_ERA5.nc" + obs_name: "ERA5" + obs_var_name: "epfy" + +epfz: + ylim: [1e2,1] + units: m3 s−2 + long_name: upward component of the Eliassen–Palm flux + obs_file: "TEM_ERA5.nc" + obs_name: "ERA5" + obs_var_name: "epfz" + +vtem: + ylim: [1e2,1] + units: m/s + long_name: Transformed Eulerian mean northward wind + obs_file: "TEM_ERA5.nc" + obs_name: "ERA5" + obs_var_name: "vtem" + +wtem: + ylim: [1e2,1] + units: m/s + long_name: Transformed Eulerian mean upward wind + obs_file: "TEM_ERA5.nc" + obs_name: "ERA5" + obs_var_name: "wtem" + +psitem: + ylim: [1e2,1] + units: m3 s−2 + long_name: Transformed Eulerian mean mass stream function + obs_file: "TEM_ERA5.nc" + obs_name: "ERA5" + obs_var_name: "psitem" + +utendepfd: + ylim: [1e2,1] + units: m3 s−2 + long_name: tendency of eastward wind due to Eliassen-Palm flux divergence + obs_file: "TEM_ERA5.nc" + obs_name: "ERA5" + obs_var_name: "utendepfd" + +utendvtem: + ylim: [1e2,1] + units: m3 s−2 + long_name: tendency of eastward wind due to TEM northward wind advection and the coriolis term + obs_file: "TEM_ERA5.nc" + obs_name: "ERA5" + obs_var_name: "utendvtem" + +utendwtem: + ylim: [1e2,1] + units: m3 s−2 + long_name: tendency of eastward wind due to TEM upward wind advection + obs_file: "TEM_ERA5.nc" + obs_name: "ERA5" + obs_var_name: "utendwtem" + #----------- #End of File diff --git a/scripts/averaging/create_TEM_files.py b/scripts/averaging/create_TEM_files.py new file mode 100644 index 000000000..e09350feb --- /dev/null +++ b/scripts/averaging/create_TEM_files.py @@ -0,0 +1,462 @@ +import xarray as xr +import numpy as np +from scipy import integrate +from numpy import ma +from datetime import date +from pathlib import Path +from glob import glob +from itertools import chain + + +def create_TEM_files(adf): + """ + Calculate the TEM variables and create new netCDF files + + """ + + #Special ADF variables + #CAM simulation variables (these quantities are always lists): + case_names = adf.get_cam_info("cam_case_name", required=True) + + #Grab h4 history files locations + cam_hist_locs = adf.get_cam_info("cam_hist_loc", required=True) + + #Extract test case years + start_years = adf.climo_yrs["syears"] + end_years = adf.climo_yrs["eyears"] + + #Grab TEM diagnostics options + tem_opts = adf.read_config_var("tem_info") + + if not tem_opts: + print("\n No TEM options provided, skipping TEM file creation." \ + "\nSee documentation or config_cam_baseline_example.yaml for options to add to configuration file.") + return + + #Get test case(s) tem over-write boolean and force to list if not by default + overwrite_tem_cases = tem_opts.get("overwrite_tem_case") + + #If overwrite argument is missing, then default to False: + if overwrite_tem_cases is None: + overwrite_tem_cases = [False]*len(case_names) + + #If overwrite argument is a scalar, + #then convert it into a list: + if not isinstance(overwrite_tem_cases, list): overwrite_tem_cases = [overwrite_tem_cases] + + #Check if comparing to observations + if adf.get_basic_info("compare_obs"): + var_obs_dict = adf.var_obs_dict + #If dictionary is empty, then there are no observations, so quit here: + if not var_obs_dict: + print("No observations found to plot against, so no TEM will be generated.") + return + + base_name = "Obs" + else: + base_name = adf.get_baseline_info("cam_case_name", required=True) + cam_hist_locs.append(adf.get_baseline_info("cam_hist_loc", required=True)) + + #Extract baseline years (which may be empty strings if using Obs): + syear_baseline = adf.climo_yrs["syear_baseline"] + eyear_baseline = adf.climo_yrs["eyear_baseline"] + + case_names.append(base_name) + start_years.append(syear_baseline) + end_years.append(eyear_baseline) + overwrite_tem_cases.append(tem_opts.get("overwrite_tem_base", False)) + #End if + + #Set default to h4 + hist_num = tem_opts.get("hist_num") + if hist_num is None: + hist_num = "h4" + + #Extract TEM file save location + output_loc = tem_opts["tem_loc"] + + #If path not specified, skip TEM calculation? + if output_loc is None: + print("\t 'tem_loc' not found in config file, so no TEM files will be generated.") + return + else: + #Notify user that script has started: + print("\n Generating CAM TEM diagnostics files...") + + output_loc = Path(output_loc) + #Check if re-gridded directory exists, and if not, then create it: + if not output_loc.is_dir(): + print(f" {output_loc} not found, making new directory") + output_loc.mkdir(parents=True) + #End if + + res = adf.variable_defaults # will be dict of variable-specific plot preferences + + if "qbo" in adf.plotting_scripts: + var_list = ['uzm','epfy','epfz','vtem','wtem', + 'psitem','utendepfd','utendvtem','utendwtem'] + else: + var_list = ['uzm','epfy','epfz','vtem','wtem','psitem','utendepfd'] + + #Check if comparing against observations + if adf.get_basic_info("compare_obs"): + print(f"\t Processing TEM for observations :") + + output_loc_idx = output_loc / base_name + #Check if re-gridded directory exists, and if not, then create it: + if not output_loc_idx.is_dir(): + print(f" {output_loc_idx} not found, making new directory") + output_loc_idx.mkdir(parents=True) + #End if + + #Set baseline file name as full path + tem_fil = output_loc_idx / f'{base_name}.TEMdiag.nc' + + #Get baseline case tem over-write boolean + overwrite_tem = tem_opts.get("overwrite_tem_base") + + #If files exist, then check if over-writing is allowed: + if (tem_fil.is_file()) and (not overwrite_tem): + print(f"\t INFO: Found TEM file and clobber is False, so moving to next case.") + pass + else: + if tem_fil.is_file(): + print(f"\t INFO: Found TEM file but clobber is True, so over-writing file.") + + #Group all TEM observation files together + tem_obs_fils = [] + for var in var_list: + if var in res: + #Gather from variable defaults file + obs_file_path = Path(res[var]["obs_file"]) + if not obs_file_path.is_file(): + obs_data_loc = adf.get_basic_info("obs_data_loc") + obs_file_path = Path(obs_data_loc)/obs_file_path + + #It's likely multiple TEM vars will come from one file, so check + #to see if it already exists from other vars. + if obs_file_path not in tem_obs_fils: + tem_obs_fils.append(obs_file_path) + + ds = xr.open_mfdataset(tem_obs_fils,combine="nested") + start_year = str(ds.time[0].values)[0:4] + end_year = str(ds.time[-1].values)[0:4] + + #Update the attributes + ds.attrs['created'] = str(date.today()) + ds['lev']=ds['level'] + ds['zalat']=ds['lat'] + + #Make a copy of obs data so we don't do anything bad + ds_obs = ds.copy() + ds_base = xr.Dataset({'uzm': xr.Variable(('time', 'lev', 'zalat'), ds_obs.uzm.data), + 'epfy': xr.Variable(('time', 'lev', 'zalat'), ds_obs.epfy.data), + 'epfz': xr.Variable(('time', 'lev', 'zalat'), ds_obs.epfz.data), + 'vtem': xr.Variable(('time', 'lev', 'zalat'), ds_obs.vtem.data), + 'wtem': xr.Variable(('time', 'lev', 'zalat'), ds_obs.wtem.data), + 'psitem': xr.Variable(('time', 'lev', 'zalat'), ds_obs.psitem.data), + 'utendepfd': xr.Variable(('time', 'lev', 'zalat'), ds_obs.utendepfd.data), + 'utendvtem': xr.Variable(('time', 'lev', 'zalat'), ds_obs.utendvtem.data), + 'utendwtem': xr.Variable(('time', 'lev', 'zalat'), ds_obs.utendwtem.data), + 'lev': xr.Variable('lev', ds_obs.level.values), + 'zalat': xr.Variable('zalat', ds_obs.lat.values), + 'time': xr.Variable('time', ds_obs.time.values) + }) + + # write output to a netcdf file + ds_base.to_netcdf(tem_fil, unlimited_dims='time', mode='w') + + #End if (file creation or over-write file) + #End if baseline case + + #Loop over cases: + for case_idx, case_name in enumerate(case_names): + + print(f"\t Processing TEM for case '{case_name}' :") + + #Extract start and end year values: + start_year = start_years[case_idx] + end_year = end_years[case_idx] + + #Create path object for the CAM history file(s) location: + starting_location = Path(cam_hist_locs[case_idx]) + + #Check that path actually exists: + if not starting_location.is_dir(): + emsg = f"Provided 'cam_hist_loc' directory '{starting_location}' not found." + emsg += " Script is ending here." + return + #End if + + #Check if history files actually exist. If not then kill script: + hist_str = '*.cam.'+hist_num + if not list(starting_location.glob(hist_str+'.*.nc')): + emsg = f"No CAM history {hist_str} files found in '{starting_location}'." + emsg += " Script is ending here." + adf.end_diag_fail(emsg) + #End if + + #Get full path and file for file name + output_loc_idx = output_loc / case_name + + #Check if re-gridded directory exists, and if not, then create it: + if not output_loc_idx.is_dir(): + print(f" {output_loc_idx} not found, making new directory") + output_loc_idx.mkdir(parents=True) + #End if + + #Set case file name + tem_fil = output_loc_idx / f'{case_name}.TEMdiag_{start_year}-{end_year}.nc' + + #Get current case tem over-write boolean + overwrite_tem = overwrite_tem_cases[case_idx] + + #If files exist, then check if over-writing is allowed: + if (tem_fil.is_file()) and (not overwrite_tem): + print(f"\t INFO: Found TEM file and clobber is False, so moving to next case.") + pass + else: + if tem_fil.is_file(): + print(f"\t INFO: Found TEM file but clobber is True, so over-writing file.") + + #Glob each set of years + #NOTE: This will make a nested list + hist_files = [] + for yr in np.arange(int(start_year),int(end_year)+1): + + #Grab all leading zeros for climo year just in case + yr = f"{str(yr).zfill(4)}" + hist_files.append(glob(f"{starting_location}/{hist_str}.{yr}*.nc")) + + #Flatten list of lists to 1d list + hist_files = sorted(list(chain.from_iterable(hist_files))) + + ds = xr.open_mfdataset(hist_files) + + #iterate over the times in a dataset + for idx,_ in enumerate(ds.time.values): + if idx == 0: + dstem0 = calc_tem(ds.squeeze().isel(time=idx)) + else: + dstem = calc_tem(ds.squeeze().isel(time=idx)) + dstem0 = xr.concat([dstem0, dstem],'time') + #End if + #End if + + #Update the attributes + dstem0.attrs = ds.attrs + dstem0.attrs['created'] = str(date.today()) + dstem0['lev']=ds['lev'] + + # write output to a netcdf file + dstem0.to_netcdf(tem_fil, unlimited_dims='time', mode='w') + + #End if (file creation or over-write file) + #Notify user that script has ended: + print(" ...TEM variables have been calculated successfully.") + + + + +def calc_tem(ds): + """ + # calc_tem() function to calculate TEM diagnostics on CAM/WACCM output + # This assumes the data have already been organized into zonal mean fluxes + # Uzm, THzm, VTHzm, Vzm, UVzm, UWzm, Wzm + # note that calculations are performed on model interface levels, which is ok + # in the stratosphere but not in the troposphere. If interested in tropospheric + # TEM diagnostics, make sure input fields have been interpolated to true pressure levels. + + # The code follows the 'TEM recipe' from Appendix A of Gerber, E. P. and Manzini, E.: + # The Dynamics and Variability Model Intercomparison Project (DynVarMIP) for CMIP6: + # assessing the stratosphere–troposphere system, Geosci. Model Dev., 9, 3413–3425, + # https://doi.org/10.5194/gmd-9-3413-2016, 2016 and the corrigendum. + + # pdf available here: https://gmd.copernicus.org/articles/9/3413/2016/gmd-9-3413-2016.pdf, + # https://gmd.copernicus.org/articles/9/3413/2016/gmd-9-3413-2016-corrigendum.pdf + + # Output from post-processing function + + # Table A1. Momentum budget variable list (2-D monthly / daily zonal means, YZT). + + # Name Long name [unit] + + # epfy northward component of the Eliassen–Palm flux [m3 s−2] + # epfz upward component of the Eliassen–Palm flux [m3 s−2] + # vtem Transformed Eulerian mean northward wind [m s−1] + # wtem Transformed Eulerian mean upward wind [m s−1] + # psitem Transformed Eulerian mean mass stream function [kg s−1] + # utendepfd tendency of eastward wind due to Eliassen–Palm flux divergence [m s−2] + # utendvtem tendency of eastward wind due to TEM northward wind advection and the Coriolis term [m s−2] + # utendwtem tendency of eastward wind due to TEM upward wind advection [m s−2] + + # this utility based on python code developed by Isla Simpson 25 Feb 2021 + # initial coding of stand alone function by Dan Marsh 16 Dec 2022 + + # NOTE: function expects an xarray dataset with dataarrays of dimension (nlev,nlat) + # to process more than one timestep iterate over time. + """ + + # constants for TEM calculations + p0 = 101325. + a = 6.371e6 + om = 7.29212e-5 + H = 7000. + g0 = 9.80665 + + nlat = ds['zalat'].size + nlev = ds['lev'].size + + latrad = np.radians(ds.zalat) + coslat = np.cos(latrad) + coslat2d = np.tile(coslat,(nlev,1)) + + pre = ds['lev']*100. # pressure levels in Pascals + f = 2.*om*np.sin(latrad[:]) + f2d = np.tile(f,(nlev,1)) + + # change missing values to NaNs + uzm = ds['Uzm'] + uzm.values = ma.masked_greater_equal(uzm, 1e33) + vzm = ds['Vzm'] + vzm.values = ma.masked_greater_equal(vzm, 1e33) + wzm = ds['Wzm'] + wzm.values = ma.masked_greater_equal(wzm, 1e33) + thzm = ds['THzm'] + thzm.values = ma.masked_greater_equal(thzm, 1e33) + + uvzm = ds['UVzm'] + uvzm.values = ma.masked_greater_equal(uvzm, 1e33) + uwzm = ds['UWzm'] + uwzm.values = ma.masked_greater_equal(uwzm, 1e33) + vthzm = ds['VTHzm'] + vthzm.values = ma.masked_greater_equal(vthzm, 1e33) + + # convert w terms from m/s to Pa/s + wzm = -1.*wzm*pre/H + uwzm = -1.*uwzm*pre/H + + # compute the latitudinal gradient of U + dudphi = (1./(a*coslat2d))*np.gradient(uzm*coslat2d, + latrad, + axis=1) + + # compute the vertical gradient of theta and u + dthdp = np.gradient(thzm, + pre, + axis=0) + + dudp = np.gradient(uzm, + pre, + axis=0) + + # compute eddy streamfunction and its vertical gradient + psieddy = vthzm/dthdp + dpsidp = np.gradient(psieddy, + pre, + axis=0) + + # (1/acos(phii))**d(psi*cosphi/dphi) for getting w* + dpsidy = (1./(a*coslat2d)) \ + * np.gradient(psieddy*coslat2d, + latrad, + axis=1) + + # TEM vertical velocity (Eq A7 of dynvarmip) + wtem = wzm+dpsidy + + # utendwtem (Eq A10 of dynvarmip) + utendwtem = -1.*wtem*dudp + + # vtem (Eq A6 of dynvarmip) + vtem = vzm-dpsidp + + # utendvtem (Eq A9 of dynvarmip) + utendvtem = vtem*(f2d - dudphi) + + # calculate E-P fluxes + epfy = a*coslat2d*(dudp*psieddy - uvzm) # Eq A2 + epfz = a*coslat2d*((f2d-dudphi)*psieddy - uwzm) # Eq A3 + + # calculate E-P flux divergence and zonal wind tendency + # due to resolved waves (Eq A5) + depfydphi = (1./(a*coslat2d)) \ + * np.gradient(epfy*coslat2d, + latrad, + axis=1) + + depfzdp = np.gradient(epfz, + pre, + axis=0) + + utendepfd = (depfydphi + depfzdp)/(a*coslat2d) + utendepfd = xr.DataArray(utendepfd, coords = ds.Uzm.coords, name='utendepfd') + + # TEM stream function, Eq A8 + topvzm = np.zeros([1,nlat]) + vzmwithzero = np.concatenate((topvzm, vzm), axis=0) + prewithzero = np.concatenate((np.zeros([1]), pre)) + intv = integrate.cumtrapz(vzmwithzero,prewithzero,axis=0) + psitem = (2*np.pi*a*coslat2d/g0)*(intv - psieddy) + + # final scaling of E-P fluxes and divergence to transform to log-pressure + epfy = epfy*pre/p0 # A13 + epfz = -1.*(H/p0)*epfz # A14 + wtem = -1.*(H/pre)*wtem # A16 + + # + # add long name and unit attributes to TEM diagnostics + uzm.attrs['long_name'] = 'Zonal-Mean zonal wind' + uzm.attrs['units'] = 'm/s' + + vzm.attrs['long_name'] = 'Zonal-Mean meridional wind' + vzm.attrs['units'] = 'm/s' + + epfy.attrs['long_name'] = 'northward component of E-P flux' + epfy.attrs['units'] = 'm3/s2' + + epfz.attrs['long_name'] = 'upward component of E-P flux' + epfz.attrs['units'] = 'm3/s2' + + vtem.attrs['long_name'] = 'Transformed Eulerian mean northward wind' + vtem.attrs['units'] = 'm/s' + + wtem.attrs['long_name'] = 'Transformed Eulerian mean upward wind' + wtem.attrs['units'] = 'm/s' + + psitem.attrs['long_name'] = 'Transformed Eulerian mean mass stream function' + psitem.attrs['units'] = 'kg/s' + + utendepfd.attrs['long_name'] = 'tendency of eastward wind due to Eliassen-Palm flux divergence' + utendepfd.attrs['units'] = 'm/s2' + + utendvtem.attrs['long_name'] = 'tendency of eastward wind due to TEM northward wind advection and the coriolis term' + utendvtem.attrs['units'] = 'm/s2' + + utendwtem.attrs['long_name'] = 'tendency of eastward wind due to TEM upward wind advection' + utendwtem.attrs['units'] = 'm/s2' + + epfy.values = np.float32(epfy.values) + epfz.values = np.float32(epfz.values) + wtem.values = np.float32(wtem.values) + psitem.values = np.float32(psitem.values) + utendepfd.values = np.float32(utendepfd.values) + utendvtem.values = np.float32(utendvtem.values) + utendwtem.values = np.float32(utendwtem.values) + + dstem = xr.Dataset(data_vars=dict(date = ds.date, + datesec = ds.datesec, + time_bnds = ds.time_bnds, + uzm = uzm, + vzm = vzm, + epfy = epfy, + epfz = epfz, + vtem = vtem, + wtem = wtem, + psitem = psitem, + utendepfd = utendepfd, + utendvtem = utendvtem, + utendwtem = utendwtem + )) + + return dstem diff --git a/scripts/plotting/tem.py b/scripts/plotting/tem.py new file mode 100644 index 000000000..526721ce1 --- /dev/null +++ b/scripts/plotting/tem.py @@ -0,0 +1,474 @@ +#Import standard modules: +from pathlib import Path +import numpy as np +import xarray as xr +import warnings # use to warn user about missing files. +import matplotlib.pyplot as plt +import pandas as pd + +#Format warning messages: +def my_formatwarning(msg, *args, **kwargs): + # ignore everything except the message + return str(msg) + '\n' +warnings.formatwarning = my_formatwarning + +def tem(adf): + """ + Plot the contents of the TEM dignostic ouput of 2-D latitude vs vertical pressure maps. + + Steps: + - loop through TEM variables + - calculate all-time fields (from individual months) + - Take difference, calculate statistics + - make plot + + """ + + #Special ADF variable which contains the output paths for + #all generated plots and tables for each case: + plot_location = Path(adf.plot_location[0]) + + #Check if plot output directory exists, and if not, then create it: + if not plot_location.is_dir(): + print(f" {plot_location} not found, making new directory") + plot_location.mkdir(parents=True) + + #CAM simulation variables (this is always assumed to be a list): + case_names = adf.get_cam_info("cam_case_name", required=True) + + res = adf.variable_defaults # will be dict of variable-specific plot preferences + + #Check if comparing against observations + if adf.compare_obs: + obs = True + base_name = "Obs" + else: + obs = False + base_name = adf.get_baseline_info("cam_case_name", required=True) + #End if + + #Extract test case years + syear_cases = adf.climo_yrs["syears"] + eyear_cases = adf.climo_yrs["eyears"] + + #Extract baseline years (which may be empty strings if using Obs): + syear_baseline = adf.climo_yrs["syear_baseline"] + eyear_baseline = adf.climo_yrs["eyear_baseline"] + + #Grab all case nickname(s) + test_nicknames = adf.case_nicknames["test_nicknames"] + base_nickname = adf.case_nicknames["base_nickname"] + case_nicknames = test_nicknames + [base_nickname] + + #Set plot file type: + # -- this should be set in basic_info_dict, but is not required + # -- So check for it, and default to png + basic_info_dict = adf.read_config_var("diag_basic_info") + plot_type = basic_info_dict.get('plot_type', 'png') + print(f"\t NOTE: Plot type is set to {plot_type}") + + # check if existing plots need to be redone + redo_plot = adf.get_basic_info('redo_plot') + print(f"\t NOTE: redo_plot is set to {redo_plot}") + #----------------------------------------- + + #Grab TEM diagnostics options + tem_opts = adf.read_config_var("tem_info") + + if not tem_opts: + print("\n No TEM options provided, skipping TEM plots." \ + "\nSee documentation or config_cam_baseline_example.yaml for options to add to configuration file.") + return + + #Location of saved TEM netCDF files + tem_loc = tem_opts.get("tem_loc") + + #If path not specified, skip TEM calculation + if tem_loc is None: + print("'tem_loc' not found in config file, so TEM plots will be skipped.") + return + else: + #Notify user that script has started: + print("\n Generating TEM plots...") + + #Set seasonal ranges: + seasons = {"ANN": np.arange(1,13,1), + "DJF": [12, 1, 2], + "JJA": [6, 7, 8], + "MAM": [3, 4, 5], + "SON": [9, 10, 11] + } + + #Suggestion from Rolando, if QBO is being produced, add utendvtem and utendwtem? + if "qbo" in adf.plotting_scripts: + var_list = ['uzm','epfy','epfz','vtem','wtem', + 'psitem','utendepfd','utendvtem','utendwtem'] + #Otherwise keep it simple + else: + var_list = ['uzm','epfy','epfz','vtem','wtem','psitem','utendepfd'] + + #Baseline TEM location + input_loc_idx = Path(tem_loc) / base_name + + #Check if comparing against obs + if obs: + #Set TEM file for observations + base_file_name = f'{base_name}.TEMdiag.nc' + else: + #Set TEM file for baseline + base_file_name = f'{base_name}.TEMdiag_{syear_baseline}-{eyear_baseline}.nc' + + #Set full path for baseline/obs file + tem_base = input_loc_idx / base_file_name + + #Check to see if baseline/obs TEM file exists + if tem_base.is_file(): + ds_base = xr.open_dataset(tem_base) + else: + print(f"\t'{base_file_name}' does not exist. TEM plots will be skipped.") + return + + #Setup TEM plots + nrows = len(var_list) + ncols = len(case_nicknames)+1 + fig_width = 20 + + #try and dynamically create size of fig based off number of cases + fig_height = 15+(ncols*nrows) + + #Loop over season dictionary: + for s in seasons: + #Location to save plots + plot_name = plot_location / f"{s}_TEM_Mean.png" + + fig, axs = plt.subplots(nrows=nrows, ncols=ncols, figsize=(fig_width,fig_height), + facecolor='w', edgecolor='k') + + #Loop over model cases: + for idx,case_name in enumerate(case_names): + + # Check redo_plot. If set to True: remove old plot, if it already exists: + if (not redo_plot) and plot_name.is_file(): + #Add already-existing plot to website (if enabled): + adf.add_website_data(plot_name, "TEM", case_name, season=s) + + #Continue to next iteration: + continue + elif (redo_plot) and plot_name.is_file(): + plot_name.unlink() + + #Extract start and end year values: + start_year = syear_cases[idx] + end_year = eyear_cases[idx] + + #Open the TEM file + output_loc_idx = Path(tem_loc) / case_name + case_file_name = f'{case_name}.TEMdiag_{start_year}-{end_year}.nc' + tem = output_loc_idx / case_file_name + + #Grab the data for the TEM netCDF files + if tem.is_file(): + ds = xr.open_dataset(tem) + else: + print(f"\t'{case_file_name}' does not exist. TEM plots will be skipped.") + return + + climo_yrs = {"test":[syear_cases[idx], eyear_cases[idx]], + "base":[syear_baseline, eyear_baseline]} + + #Setup and plot the sub-plots + tem_plot(ds, ds_base, case_nicknames, axs, s, var_list, res, obs, climo_yrs) + + #Set figure title + plt.suptitle(f'TEM Diagnostics: {s}', fontsize=20, y=.928) + + #Write the figure to provided workspace/file: + fig.savefig(plot_name, bbox_inches='tight', dpi=300) + + #Add plot to website (if enabled): + adf.add_website_data(plot_name, "TEM", case_name, season=s) + +# Helper functions +################## + +def tem_plot(ds, ds_base, case_names, axs, s, var_list, res, obs, climo_yrs): + """ + TEM subplots + + """ + #Set empty message for comparison of cases with different vertical levels + #TODO: Work towards getting the vertical and horizontal interpolations!! - JR + empty_message = "These have different vertical levels\nCan't compare cases currently" + props = {'boxstyle': 'round', 'facecolor': 'wheat', 'alpha': 0.9} + prop_x = 0.18 + prop_y = 0.42 + + for var in var_list: + #Grab variable defaults for this variable + vres = res[var] + + #Gather data for both cases + mdata = ds[var].squeeze() + odata = ds_base[var].squeeze() + + #Create array to avoid weighting missing values: + md_ones = xr.where(mdata.isnull(), 0.0, 1.0) + od_ones = xr.where(odata.isnull(), 0.0, 1.0) + + month_length = mdata.time.dt.days_in_month + weights = (month_length.groupby("time.season") / month_length.groupby("time.season").sum()) + + #Calculate monthly-weighted seasonal averages: + if s == 'ANN': + + #Calculate annual weights (i.e. don't group by season): + weights_ann = month_length / month_length.sum() + + mseasons = (mdata * weights_ann).sum(dim='time') + mseasons = mseasons / (md_ones*weights_ann).sum(dim='time') + + #Calculate monthly weights based on number of days: + if obs: + month_length_obs = odata.time.dt.days_in_month + weights_ann_obs = month_length_obs / month_length_obs.sum() + oseasons = (odata * weights_ann_obs).sum(dim='time') + oseasons = oseasons / (od_ones*weights_ann_obs).sum(dim='time') + else: + month_length_base = odata.time.dt.days_in_month + weights_ann_base = month_length_base / month_length_base.sum() + oseasons = (odata * weights_ann_base).sum(dim='time') + oseasons = oseasons / (od_ones*weights_ann_base).sum(dim='time') + + else: + #this is inefficient because we do same calc over and over + mseasons = (mdata * weights).groupby("time.season").sum(dim="time").sel(season=s) + wgt_denom = (md_ones*weights).groupby("time.season").sum(dim="time").sel(season=s) + mseasons = mseasons / wgt_denom + + if obs: + month_length_obs = odata.time.dt.days_in_month + weights_obs = (month_length_obs.groupby("time.season") / month_length_obs.groupby("time.season").sum()) + oseasons = (odata * weights_obs).groupby("time.season").sum(dim="time").sel(season=s) + wgt_denom = (od_ones*weights_obs).groupby("time.season").sum(dim="time").sel(season=s) + oseasons = oseasons / wgt_denom + else: + month_length_base = odata.time.dt.days_in_month + weights_base = (month_length_base.groupby("time.season") / month_length_base.groupby("time.season").sum()) + oseasons = (odata * weights_base).groupby("time.season").sum(dim="time").sel(season=s) + wgt_denom_base = (od_ones*weights_base).groupby("time.season").sum(dim="time").sel(season=s) + oseasons = oseasons / wgt_denom_base + + #difference: each entry should be (lat, lon) + dseasons = mseasons-oseasons + + #Run through variables and plot each against the baseline on each row + #Each column will be a case, ie (test, base, difference) + + # uzm + #------------------------------------------------------------------------------------------ + if var == "uzm": + mseasons.plot(ax=axs[0,0], y='lev', yscale='log',ylim=[1e3,1], + cbar_kwargs={'label': ds[var].units}) + + oseasons.plot(ax=axs[0,1], y='lev', yscale='log',ylim=[1e3,1], + cbar_kwargs={'label': ds[var].units}) + + #Check if difference plot has contour levels, if not print notification + if len(dseasons.lev) == 0: + axs[0,2].text(prop_x, prop_y, empty_message, transform=axs[0,2].transAxes, bbox=props) + else: + dseasons.plot(ax=axs[0,2], y='lev', yscale='log', ylim=[1e3,1],cmap="BrBG", + cbar_kwargs={'label': ds[var].units}) + + # epfy + #------------------------------------------------------------------------------------------ + if var == "epfy": + mseasons.plot(ax=axs[1,0], y='lev', yscale='log',vmax=1e6,ylim=[1e2,1], + cbar_kwargs={'label': ds[var].units}) + + oseasons.plot(ax=axs[1,1], y='lev', yscale='log',vmax=1e6,ylim=[1e2,1], + cbar_kwargs={'label': ds[var].units}) + + #Check if difference plot has contour levels, if not print notification + if len(dseasons.lev) == 0: + axs[1,2].text(prop_x, prop_y, empty_message, transform=axs[1,2].transAxes, bbox=props) + else: + dseasons.plot(ax=axs[1,2], y='lev', yscale='log', vmax=1e6, + ylim=[1e2,1],cmap="BrBG", + cbar_kwargs={'label': ds[var].units}) + + # epfz + #------------------------------------------------------------------------------------------ + if var == "epfz": + mseasons.plot(ax=axs[2,0], y='lev', yscale='log',vmax=1e5,ylim=[1e2,1], + cbar_kwargs={'label': ds[var].units}) + + oseasons.plot(ax=axs[2,1], y='lev', yscale='log',vmax=1e5,ylim=[1e2,1], + cbar_kwargs={'label': ds[var].units}) + + #Check if difference plot has contour levels, if not print notification + if len(dseasons.lev) == 0: + axs[2,2].text(prop_x, prop_y, empty_message, transform=axs[2,2].transAxes, bbox=props) + else: + dseasons.plot(ax=axs[2,2], y='lev', yscale='log', vmax=1e5, + ylim=[1e2,1],cmap="BrBG", + cbar_kwargs={'label': ds[var].units}) + + # vtem + #------------------------------------------------------------------------------------------ + if var == "vtem": + mseasons.plot.contourf(ax=axs[3,0], levels = 21, y='lev', yscale='log', + vmax=3,vmin=-3,ylim=[1e2,1], cmap='RdBu_r', + cbar_kwargs={'label': ds[var].units}) + mseasons.plot.contour(ax=axs[3,0], levels = 11, y='lev', yscale='log', + vmax=3,vmin=-3,ylim=[1e2,1], + colors='black', linestyles=None) + + oseasons.plot.contourf(ax=axs[3,1], levels = 21, y='lev', yscale='log', + vmax=3,vmin=-3,ylim=[1e2,1], cmap='RdBu_r', + cbar_kwargs={'label': ds[var].units}) + oseasons.plot.contour(ax=axs[3,1], levels = 11, y='lev', yscale='log', + vmax=3,vmin=-3,ylim=[1e2,1], + colors='black', linestyles=None) + + #Check if difference plot has contour levels, if not print notification + if len(dseasons.lev) == 0: + axs[3,2].text(prop_x, prop_y, empty_message, transform=axs[3,2].transAxes, bbox=props) + else: + dseasons.plot(ax=axs[3,2], y='lev', yscale='log', vmax=3,vmin=-3, + ylim=[1e2,1],cmap="BrBG", + cbar_kwargs={'label': ds[var].units}) + + # wtem + #------------------------------------------------------------------------------------------ + if var == "wtem": + mseasons.plot.contourf(ax=axs[4,0], levels = 21, y='lev', yscale='log', + vmax=0.005, vmin=-0.005, ylim=[1e2,1], cmap='RdBu_r', + cbar_kwargs={'label': ds[var].units}) + mseasons.plot.contour(ax=axs[4,0], levels = 7, y='lev', yscale='log', + vmax=0.03, vmin=-0.03, ylim=[1e2,1], + colors='black', linestyles=None) + + oseasons.plot.contourf(ax=axs[4,1], levels = 21, y='lev', yscale='log', + vmax=0.005, vmin=-0.005, ylim=[1e2,1], cmap='RdBu_r', + cbar_kwargs={'label': ds[var].units}) + oseasons.plot.contour(ax=axs[4,1], levels = 7, y='lev', yscale='log', + vmax=0.03, vmin=-0.03, ylim=[1e2,1], + colors='black', linestyles=None) + + #Check if difference plot has contour levels, if not print notification + if len(dseasons.lev) == 0: + axs[4,2].text(prop_x, prop_y, empty_message, transform=axs[4,2].transAxes, bbox=props) + else: + dseasons.plot(ax=axs[4,2], y='lev', yscale='log',vmax=0.005, vmin=-0.005, + ylim=[1e2,1],cmap="BrBG", + cbar_kwargs={'label': ds[var].units}) + + # psitem + #------------------------------------------------------------------------------------------ + if var == "psitem": + mseasons.plot.contourf(ax=axs[5,0], levels = 21, y='lev', yscale='log', + vmax=5e9, ylim=[1e2,2], + cbar_kwargs={'label': ds[var].units}) + + oseasons.plot.contourf(ax=axs[5,1], levels = 21, y='lev', yscale='log', + vmax=5e9, ylim=[1e2,2], + cbar_kwargs={'label': ds[var].units}) + + #Check if difference plot has contour levels, if not print notification + if len(dseasons.lev) == 0: + axs[5,2].text(prop_x, prop_y, empty_message, transform=axs[5,2].transAxes, bbox=props) + else: + dseasons.plot(ax=axs[5,2], y='lev', yscale='log',vmax=5e9, + ylim=[1e2,2],cmap="BrBG", + cbar_kwargs={'label': ds[var].units}) + + # utendepfd + #------------------------------------------------------------------------------------------ + if var == "utendepfd": + mseasons.plot(ax=axs[6,0], y='lev', yscale='log', + vmax=0.0001, vmin=-0.0001, ylim=[1e2,2], + cbar_kwargs={'label': ds[var].units}) + + oseasons.plot(ax=axs[6,1], y='lev', yscale='log', + vmax=0.0001, vmin=-0.0001, ylim=[1e2,2], + cbar_kwargs={'label': ds[var].units}) + + #Check if difference plot has contour levels, if not print notification + if len(dseasons.lev) == 0: + axs[6,2].text(prop_x, prop_y, empty_message, transform=axs[6,2].transAxes, bbox=props) + else: + dseasons.plot(ax=axs[6,2], y='lev', yscale='log',vmax=0.0001, vmin=-0.0001, + ylim=[1e2,2],cmap="BrBG", + cbar_kwargs={'label': ds[var].units}) + + # utendvtem + #------------------------------------------------------------------------------------------ + if var == "utendvtem": + mseasons.plot(ax=axs[7,0], y='lev', yscale='log',vmax=0.001, ylim=[1e3,1], + cbar_kwargs={'label': ds[var].units}) + + oseasons.plot(ax=axs[7,1], y='lev', yscale='log',vmax=0.001, ylim=[1e3,1], + cbar_kwargs={'label': ds[var].units}) + + #Check if difference plot has contour levels, if not print notification + if len(dseasons.lev) == 0: + axs[7,2].text(prop_x, prop_y, empty_message, transform=axs[7,2].transAxes, bbox=props) + else: + dseasons.plot(ax=axs[7,2], y='lev', yscale='log', vmax=0.001, ylim=[1e3,1],cmap="BrBG", + cbar_kwargs={'label': ds[var].units}) + + # utendwtem + #------------------------------------------------------------------------------------------ + if var == "utendwtem": + mseasons.plot(ax=axs[8,0], y='lev', yscale='log',vmax=0.0001, ylim=[1e3,1], + cbar_kwargs={'label': ds[var].units}) + + oseasons.plot(ax=axs[8,1], y='lev', yscale='log',vmax=0.0001, ylim=[1e3,1], + cbar_kwargs={'label': ds[var].units}) + + #Check if difference plot has contour levels, if not print notification + if len(dseasons.lev) == 0: + axs[8,2].text(prop_x, prop_y, empty_message, transform=axs[8,2].transAxes, bbox=props) + else: + dseasons.plot(ax=axs[8,2], y='lev', yscale='log', vmax=0.0001, ylim=[1e3,1],cmap="BrBG", + cbar_kwargs={'label': ds[var].units}) + + # Set the ticks and ticklabels for all x-axes + #NOTE: This has to come after all subplots have been done, + #I am assuming this is because of the way xarray plots info automatically for labels and titles + #This is to change the default xarray labels for each instance of the xarray plot method + plt.setp(axs, xticks=np.arange(-80,81,20), xlabel='latitude', title="") + + #Set titles of subplots + #Set case names in first subplot only + uzm = ds["uzm"].long_name.replace(" ", "\ ") + + test_yrs = f"{climo_yrs['test'][0]}-{climo_yrs['test'][1]}" + axs[0,0].set_title(f"\n\n"+"$\mathbf{Test}$"+f" yrs: {test_yrs}\n"+f"{case_names[0]}\n\n\n",fontsize=14) + + if obs: + obs_title = Path(vres["obs_name"]).stem + axs[0,1].set_title(f"\n\n"+"$\mathbf{Baseline}$\n"+f"{obs_title}\n\n"+"$\mathbf{"+uzm+"}$"+"\n",fontsize=14) + + else: + base_yrs = f"{climo_yrs['base'][0]}-{climo_yrs['base'][1]}" + axs[0,1].set_title(f"\n\n"+"$\mathbf{Baseline}$"+f" yrs: {base_yrs}\n"+f"{case_names[1]}\n\n"+"$\mathbf{"+uzm+"}$"+"\n",fontsize=14) + + #Set main title for difference plots column + axs[0,2].set_title("$\mathbf{Test} - \mathbf{Baseline}$"+"\n\n\n",fontsize=14) + + #Set variable name on center plot (except first plot, see above) + for i in range(1,len(var_list)): + var_name = ds[var_list[i]].long_name.replace(" ", "\ ") + axs[i,1].set_title("$\mathbf{"+var_name+"}$"+"\n",fontsize=14) + + #Adjust subplots + #May need to adjust hspace and wspace depending on if multi-case diagnostics ever happen for TEM diags + hspace = 0.4 + plt.subplots_adjust(wspace=0.5, hspace=hspace) + + return axs + +############## +#END OF SCRIPT \ No newline at end of file