From 8bd39f95051a7331a73712389b0471b973d06c61 Mon Sep 17 00:00:00 2001 From: Eliot Quon Date: Fri, 24 Mar 2023 14:23:11 -0600 Subject: [PATCH 1/4] Update python reqmt --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 5d5f4ab..ada7f74 100644 --- a/setup.py +++ b/setup.py @@ -28,7 +28,7 @@ URL = 'https://github.com/a2e-mmc/mmctools' EMAIL = 'eliot.quon@nrel.gov' AUTHOR = 'U.S. Department of Energy' -REQUIRES_PYTHON = '>=3.10.6' +REQUIRES_PYTHON = '>=3.10.5' VERSION = '0.1.1' # What packages are required for this module to be executed? From cbd02cfb52da76e8c74b4bb1135f811e35f27e47 Mon Sep 17 00:00:00 2001 From: Eliot Quon Date: Fri, 24 Mar 2023 14:23:15 -0600 Subject: [PATCH 2/4] Squashed 'mmctools/windtools/' changes from c8533ba..5289cef 5289cef Merge pull request #39 from ewquon/f/update_ensight_reader 9e5e57b Add, update `sort` and `t` read_vector kwargs ef4cb32 Add read_connectivity option to read connectivity info 4a7b1ae Merge pull request #32 from rthedin/feature/vtkreader 5b8445e Fix typo d700ca2 Make `readVTK` function more generic ca53f72 Merge pull request #31 from rthedin/feature/probeSetClassImprovement 14d6a73 Merge pull request #38 from ewquon/f/openfoam_includes 1456a96 Drop variable when squeezing VTK data if `squash!=None` 3a6defc Combine VTK functions into `windtools.io.vtk` efddd1e Reader now works for for generic data varying in any dimension 804a557 Add `indexing` for consistent meshgrid 87f91b1 Add support for parsing #include f18bbc1 Merge pull request #36 from ewquon/f/update_BinaryFile 44745d3 Add overloaded BinaryFile.write() 734b4ab Generalize encoding for reads 173d0b2 Style cleanup bb95893 Merge pull request #35 from ewquon/hotfix/handle_binary_EOF 1d7c854 Handle EOF with BinaryFile.readline() 7dd0de7 Merge pull request #34 from ewquon/f/SOWFAtimeleft 899cbf9 Add license afa4b2b Add usage info 042303d Add script to check time left in sowfa run e8c8b89 Add functions to read VTKs 1e08562 Substantial change in reading logic. See extended commit message. b7ff75e Catch missing/mispelled probe parameters git-subtree-dir: mmctools/windtools git-subtree-split: 5289cefe222a87c1fb40166d0c5eb1ae57c690ca --- bin/sowfa_time_left.py | 159 ++++++++ windtools/SOWFA6/postProcessing/probeSets.py | 145 ++++++-- windtools/io/binary.py | 87 +++-- windtools/io/ensight.py | 82 +++- windtools/io/vtk.py | 371 ++++++++++++++++++- windtools/openfoam.py | 33 +- 6 files changed, 797 insertions(+), 80 deletions(-) create mode 100755 bin/sowfa_time_left.py diff --git a/bin/sowfa_time_left.py b/bin/sowfa_time_left.py new file mode 100755 index 0000000..c3f0887 --- /dev/null +++ b/bin/sowfa_time_left.py @@ -0,0 +1,159 @@ +# Copyright 2021 NREL + +# Licensed under the Apache License, Version 2.0 (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software distributed +# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +# CONDITIONS OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. + +# +# Usage +# ----- +# sowfa_time_left.py # to scrape timing from latest log.*.${application} +# sowfa_time_left.py /path/to/solver.log # to scrape timing from specific file +# +# Options +# ------- +# -plot : plot timing history +# -write : write timing history +# + +import sys +import os +import glob +import time +import numpy as np + +log_est_total_time = 'estimated_total_time' + +dpath = '.' + +makeplots = False +if '-plot' in sys.argv: + makeplots = True + import matplotlib.pyplot as plt + sys.argv.remove('-plot') + +writehist = False +if '-write' in sys.argv: + writehist = True + sys.argv.remove('-write') + +if len(sys.argv) > 1: + fpath = sys.argv[1] + dpath = os.path.split(fpath)[0] + +with open(os.path.join(dpath,'system','controlDict'),'r') as f: + for line in f: + if line.strip().startswith('application'): + app = line.split()[1].split(';')[0] + elif line.strip().startswith('deltaT'): + dt = float(line.split()[1][:-1]) + elif line.strip().startswith('endTime'): + endTime = float(line.split()[1][:-1]) + +if len(sys.argv) == 1: + import glob + logfiles = glob.glob('log.*'+app) + logfiles.sort() + fpath = logfiles[-1] + +nsteps = 0 +startTime = -1 +simTimes = [] +clockTimes = [] +CourantMax = [] +try: + with open(fpath,'r') as f: + for line in f: + if line.startswith('Create mesh'): + startTime = float(line.split()[-1]) + if startTime > 0: + print('Detected restart from t =',startTime) + elif line.startswith('Time ='): + curTime = float(line.split()[2]) + simTimes.append(curTime) + nsteps += 1 + elif line.startswith('ExecutionTime ='): + elapsedTime = float(line.split()[6]) + clockTimes.append(elapsedTime) + elif line.startswith('Courant Number'): + cflmax = float(line.split()[-1]) + CourantMax.append(cflmax) +except NameError: + sys.exit('USAGE: '+sys.argv[0]+' log_file') +except IOError: + sys.exit('Problem reading '+fpath) + +completed = (curTime-startTime) / (endTime-startTime) + +def disptime(t, + threshold=100.0, + timeunits=['s','min','hrs','days'], + timeconvert=[60.,60.,24.]): + i = 0 + t *= 1.0 # convert to float + while t > threshold and i < len(timeunits)-1: + t /= timeconvert[i] + i += 1 + return '%.1f %s' % (t,timeunits[i]) # old format, for compatibility + +print('Simulation is at currently at t = {:.1f} and will end at {:.1f} ({:.1f}% complete)'.format( + curTime, endTime, 100*completed)) +print(app,'has been running for',nsteps,'steps') +print('Elapsed time:',disptime(elapsedTime)) + +timestep_size = np.diff(np.array(simTimes)) +ctime_per_step = np.diff(np.array(clockTimes)) +print('Average/min/max time per step', \ + np.mean(ctime_per_step), np.min(ctime_per_step), np.max(ctime_per_step)) + +totalTime = elapsedTime / completed +print('ESTIMATED TOTAL TIME:',disptime(totalTime)) +if log_est_total_time: + try: + with open(os.path.join(dpath,log_est_total_time),'a') as f: + f.write(time.strftime('%x %X\tsimulated t={:.1f}s, N={:d}\tESTIMATED TOTAL TIME: {:s}\n'.format(curTime,nsteps,disptime(totalTime)))) + except IOError: + pass + +remainingTime = totalTime - elapsedTime +print('Remaining time:',disptime(remainingTime)) + +timeStepsLeft = (endTime-curTime) / timestep_size[-1] +remainingTimeOpt = timeStepsLeft * ctime_per_step[-1] +print('Remaining time (based on clocktime for last timestep):',disptime(remainingTimeOpt)) +print(' ',timeStepsLeft,'time steps left') +print(' ~',ctime_per_step[-1],'clock time per step') + +print('Average/min/max maximum Courant number per step', \ + np.mean(CourantMax), np.min(CourantMax), np.max(CourantMax)) + +print(time.strftime('Current date/time is %x %X')) + +if writehist: + N = min(len(simTimes),len(ctime_per_step)) + with open(fpath+'.timing','w') as f: + f.write(time.strftime('# Current system time: %x %X\n')) + f.write('# step simulation_time wallclock_time_per_step\n') + for i in range(N): + f.write('{:d} {:f} {:f}\n'.format(i+1,simTimes[i],ctime_per_step[i])) + +if makeplots: + plt.figure() + plt.plot(ctime_per_step) + plt.xlabel('iteration') + plt.ylabel('Clock time / step [s]') + + if (np.max(timestep_size)-np.min(timestep_size)) > 1e-6: + plt.figure() + plt.plot(timestep_size) + plt.xlabel('iteration') + plt.ylabel('simulated timestep [s]') + else: + print('Skipping timestep plot for constant dt=',np.mean(timestep_size)) + + plt.show() diff --git a/windtools/SOWFA6/postProcessing/probeSets.py b/windtools/SOWFA6/postProcessing/probeSets.py index a56818c..29f7aea 100644 --- a/windtools/SOWFA6/postProcessing/probeSets.py +++ b/windtools/SOWFA6/postProcessing/probeSets.py @@ -16,7 +16,7 @@ """ from __future__ import print_function -import os +import os, glob import pandas as pd import numpy as np from .reader import Reader @@ -106,7 +106,8 @@ def __init__(self, dpath=None, tstart=None, tend=None, varList='all', posPert=0. self.tstart = tstart self.tend = tend self.varList = varList - self._allVars = {'U','UMean','T','TMean','TPrimeUPrimeMean','UPrime2Mean','p_rgh'} + self._allVars = {'U','T','p_rgh','UMean','TMean','UPrime2Mean','TPrimeUPrimeMean'} + self._printzagl = False super().__init__(dpath,includeDt=True,**kwargs) @@ -147,7 +148,10 @@ def _processdirs(self, tdirList, trimOverlap=False, **kwargs): for prefix in fprefix: for param in fparam: for suffix in fsuffix: - fileList.append( prefix + param + '_' + var + suffix ) + try: + fileList.append( prefix + param + '_' + var + suffix ) + except TypeError: + raise ValueError('Specify fprefix, fparam, varList, and fsuffix (check spelling)') outputs = fileList # Get list of times and trim the data @@ -165,26 +169,32 @@ def _processdirs(self, tdirList, trimOverlap=False, **kwargs): if not tdirList: raise ValueError('No time directories found') - # Process all data + # Process all data. Loop on files, not variables. the files, however, contain a single varible. for field in outputs: - arrays = [ self._read_data( tdir,field ) for tdir in tdirList ] + # parse the name to create the right variable (var is always a list) + param, var = self._parseProbeName(field) + # Read the file that contains the variable, getting only the desired var + arrays = [ self._read_data( tdir, field, param, var ) for tdir in tdirList ] # combine into a single array and trim end of time series arrays = np.concatenate(arrays)[:self.imax,:] - # parse the name to create the right variable - param, var = self._parseProbeName(field) - # add the zagl to the array - arrays = np.hstack((arrays[:,:4], \ - np.full((arrays.shape[0],1),param), \ - arrays[:,4:])) - # append to (or create) a variable attribute + + # if `param` is an integer, consider that zagl and add to the array. Otherwise, skip it + if isinstance(param, int): + print('Param is integer. Assuming it means zagl and adding it') + # add the zagl to the array + arrays = np.hstack((arrays[:,:4], \ + np.full((arrays.shape[0],1),param), \ + arrays[:,4:])) + self._printzagl = True + try: setattr(self,var,np.concatenate((getattr(self,var),arrays))) except AttributeError: setattr( self, var, arrays ) - - if not var in self._processed: + + if not var in self._processed: self._processed.append(var) - print(' read',field) + print(f' read {self.fprefix}{param}_*{self.fsuffix}, variable {var}') self.t = np.unique(arrays[:,0]) self.Nt = len(self.t) @@ -198,24 +208,82 @@ def _processdirs(self, tdirList, trimOverlap=False, **kwargs): def _parseProbeName(self, field): - # Example: get 'vmasts_50mGrid_h30_T.xy' and return param=30, var='T' - # Remove the prefix from the full field name - f = field.replace(self.fprefix,'') - # Substitude the first underscore with a dot and split array - f = f.replace('_','.',1).split('.') - for i in set(f).intersection(self._allVars): - var = i - param = int(f[-3]) + # This function will only receive `field` with one variable, even if + # that file does not exist. + # Examples: gets 'vmasts_50mGrid_h30_T.xy' and returns param=30, var='T' + # gets 'vmasts_m1_1_U.xy' and returns param='m1_1', var='U' + # it will never get 'vmasts_m1_2_T_p_rgh' + + # Remove the prefix and suffix from the full field name + f = field.replace(self.fprefix,'').replace(self.fsuffix,'') + + # Find the variable(s) that are in the field name + for v in self._allVars: + if v in f: + var = v + # remove the variable fromm field, alongside preceding underscore + f = f.replace('_'+v, '') + break + + # what is left is the param + if f.isdigit(): param = int(f) + else: param = f + return param, var + + def _getFileContainingVar(self, dpath, param, var): + # Example: gets 'vmast_50_T.xy' and returns 'vmast_50_T_p_rgh.xy' and pos=1 + # gets 'vmast_50_T.xy' and returns 'vmast_50_T.xy' and pos=1 + # gets 'vmast_50_T.xy' and returns 'vmast_50_p_rgh_T.xy' and pos=2 + + # Get filename that has `param` and `var` in it + fname = os.path.basename(glob.glob(os.path.join(dpath,'*'+param+'_*'+var+'*'))[0]) + + # Get all the variables present in the current file. E.g.: ['T'], ['T','prgh'] + varsInFname = fname.replace(self.fprefix,'').replace(param+'_','').replace(self.fsuffix,'') \ + .replace('p_rgh','prgh').split('_') + + # Find position of the variable. First get rid of _ in p_rgh + pos = varsInFname.index(var.replace('_','')) + + # Length of the variable + if var in ['T','TMean','p_rgh']: + lenvar = 1 + elif var in ['U','UMean']: + lenvar = 3 + elif var in ['TPrimeUPrimeMean','UPrime2Mean']: + lenvar = 6 + else: + raise NotImplementedError('Unknown variable name. Consider expanding the class.') + + pos = pos*lenvar+1 - def _read_data(self, dpath, fname): - fpath = dpath + os.sep + fname + return fname, pos, lenvar + + + def _read_data(self, dpath, fname, param, var): + + fpath = dpath + os.sep + fname + pos = 0 + + # The sampling set groups scalars, vectors, and tensors in the same file. Due to this, + # files with names such as 'vmasts_h30_T_p_rgh.xy' will exist. If the desired variable + # is part of a file with multiple variables, then the `fname` filename passed to this + # function will not exist. Thus, we get the actual filename that contains the desired + # variable in its name and what position (column) that variable is in. + if not os.path.isfile(fpath): + actualfname, pos, lenvar = self._getFileContainingVar(dpath, param, var) + fpath = dpath + os.sep + actualfname + currentTime = float(os.path.basename(dpath)) with open(fpath) as f: try: # read the actual data from probes array = self._read_probe_posAndData(f) + # get the correct location of the var if needed + if pos != 0: + array = np.c_[array[:,0:3], array[:,2+pos:2+pos+lenvar]] # add current time step info to first column array = np.c_[np.full(array.shape[0],currentTime), array] except IOError: @@ -279,6 +347,7 @@ def to_pandas(self,itime=None,fields=None,dtype=None): # create dataframes for each field print('Creating dataframe ...') + printzagl = self._printzagl data = {} for var in fields: print('processing', var) @@ -287,22 +356,30 @@ def to_pandas(self,itime=None,fields=None,dtype=None): data['time'] = F[:,0] data['x'] = F[:,1] data['y'] = F[:,2] - data['zabs'] = F[:,3] - data['zagl'] = F[:,4] - if F.shape[1]==6: + if printzagl: + data['zabs'] = F[:,3] + data['zagl'] = F[:,4] + else: + data['z'] = F[:,3] + if F.shape[1]==5+printzagl: # scalar - data[var] = F[:,5:].flatten() - elif F.shape[1]==8: + data[var] = F[:,4+printzagl:].flatten() + elif F.shape[1]==7+printzagl: # vector for j,name in enumerate(['x','y','z']): - data[var+name] = F[:,5+j].flatten() - elif F.shape[1]==11: + data[var+name] = F[:,4+printzagl+j].flatten() + elif F.shape[1]==10+printzagl: # symmetric tensor for j,name in enumerate(['xx','xy','xz','yy','yz','zz']): - data[var+name] = F[:,5+j].flatten() + data[var+name] = F[:,4+printzagl+j].flatten() df = pd.DataFrame(data=data,dtype=dtype) - return df.sort_values(['time','x','y','zabs','zagl']).set_index(['time','x','y','zagl']) + if printzagl: + df = df.sort_values(['time','x','y','zabs','zagl']).set_index(['time','x','y','zagl']) + else: + df = df.sort_values(['time','x','y','z']).set_index(['time','x','y','z']) + + return df def to_netcdf(self,fname,fieldDescriptions={},fieldUnits={}): raise NotImplementedError('Not available for ProbeSet class.') diff --git a/windtools/io/binary.py b/windtools/io/binary.py index c001295..6d48880 100644 --- a/windtools/io/binary.py +++ b/windtools/io/binary.py @@ -13,7 +13,7 @@ import struct -class BinaryFile: +class BinaryFile(object): """ Helper class for handling binary file I/O """ @@ -62,44 +62,64 @@ def unpack(self,*args): except struct.error: raise IOError - def read_char(self): - return self.f.read(1).decode('utf-8') + def read_char(self,encoding='utf-8'): + return self.f.read(1).decode(encoding) - def readline(self): + def readline(self,encoding='utf-8'): s = '' - b = self.read_char() - while not b == '\n': + b = self.read_char(encoding) + while not b in ['\n','']: s += b - b = self.f.read(1).decode('utf-8') + b = self.read_char(encoding) return s + b - # integers + # integer reads def read_int1(self,N=1): - if N==1: return self.unpack('b',self.f.read(1))[0] #short - else: return self.unpack('{:d}b',self.f.read(N*1))[0:N] #short + if N==1: + return self.unpack('b',self.f.read(1))[0] #short + else: + return self.unpack('{:d}b',self.f.read(N*1))[0:N] #short def read_int2(self,N=1): - if N==1: return self.unpack('h',self.f.read(2))[0] #short - else: return self.unpack('{:d}h'.format(N),self.f.read(N*2))[0:N] #short + if N==1: + return self.unpack('h',self.f.read(2))[0] #short + else: + return self.unpack('{:d}h'.format(N),self.f.read(N*2))[0:N] #short def read_int4(self,N=1): - if N==1: return self.unpack('i',self.f.read(4))[0] #int - else: return self.unpack('{:d}i'.format(N),self.f.read(N*4))[0:N] #int + if N==1: + return self.unpack('i',self.f.read(4))[0] #int + else: + return self.unpack('{:d}i'.format(N),self.f.read(N*4))[0:N] #int def read_int8(self,N=1): - if N==1: return self.unpack('l',self.f.read(8))[0] #long - else: return self.unpack('{:d}l'.format(N),self.f.read(N*8))[0:N] #long + if N==1: + return self.unpack('l',self.f.read(8))[0] #long + else: + return self.unpack('{:d}l'.format(N),self.f.read(N*8))[0:N] #long + def read_int(self,N=1): + return self.read_int4(N) - # floats + # float reads def read_float(self,N=1,dtype=float): - if N==1: return dtype( self.unpack('f',self.f.read(4))[0] ) - else: return [ dtype(val) for val in self.unpack('{:d}f'.format(N),self.f.read(N*4))[0:N] ] + if N==1: + return dtype( self.unpack('f',self.f.read(4))[0] ) + else: + return [ dtype(val) for val in self.unpack('{:d}f'.format(N),self.f.read(N*4))[0:N] ] def read_double(self,N=1): - if N==1: return self.unpack('d',self.f.read(8))[0] - else: return self.unpack('{:d}d'.format(N),self.f.read(N*8))[0:N] + if N==1: + return self.unpack('d',self.f.read(8))[0] + else: + return self.unpack('{:d}d'.format(N),self.f.read(N*8))[0:N] def read_real4(self,N=1): return self.read_float(N,dtype=np.float32) def read_real8(self,N=1): return self.read_float(N,dtype=np.float64) # binary output + def write(self,val,encoding='utf-8'): + if isinstance(val,str): + self.f.write(val.encode(encoding)) + else: + self.f.write(val) + def write_type(self,val,type): if hasattr(val,'__iter__'): N = len(val) @@ -107,16 +127,19 @@ def write_type(self,val,type): else: self.f.write(struct.pack(type,val)) - # aliases - def read_int(self,N=1): return self.read_int4(N) - - def write_int1(self,val): self.write_type(val,'b') - def write_int2(self,val): self.write_type(val,'h') - def write_int4(self,val): self.write_type(val,'i') - def write_int8(self,val): self.write_type(val,'l') - - def write_int(self,val): self.write_int4(val) - def write_float(self,val): self.write_type(val,'f') - def write_double(self,val): self.write_type(val,'d') + def write_int(self,val): + self.write_int4(val) + def write_int1(self,val): + self.write_type(val,'b') + def write_int2(self,val): + self.write_type(val,'h') + def write_int4(self,val): + self.write_type(val,'i') + def write_int8(self,val): + self.write_type(val,'l') + def write_float(self,val): + self.write_type(val,'f') + def write_double(self,val): + self.write_type(val,'d') diff --git a/windtools/io/ensight.py b/windtools/io/ensight.py index f5faa0d..b0b2183 100644 --- a/windtools/io/ensight.py +++ b/windtools/io/ensight.py @@ -13,24 +13,84 @@ import pandas as pd -def read_mesh(fpath,headerlength=8,chunksize=None): - """Read Ensight mesh file (ascii) into a dataframe""" +def read_mesh(fpath,headerlength=8,chunksize=None,read_connectivity=False,verbose=False): + """Read Ensight mesh file (ascii) into a dataframe + + Parameters + ---------- + headerlength: integer + Number of header lines to skip + chunksize: integer or None + Chunksize parameter for pd.read_csv, can speed up I/O + read_connectivity: bool + If True, read in connectivity information and convert points + from nodes to cell centers + """ with open(fpath,'r') as f: + # header for _ in range(headerlength): - f.readline() + line = f.readline() + if verbose: + print(line,end='') N = int(f.readline()) + + # read points if chunksize is None: mesh = pd.read_csv(f,header=None,nrows=3*N).values else: mesh = pd.concat(pd.read_csv(f,header=None,nrows=3*N,chunksize=chunksize)).values - df = pd.DataFrame(data=mesh.reshape((N,3),order='F'), columns=['x','y','z']) - return df + if verbose: + print(f'read {N} points') + mesh = pd.DataFrame(data=mesh.reshape((N,3),order='F'), columns=['x','y','z']) + if read_connectivity: + # pd.read_csv may read data in chunks, so reading the next line is not + # guaranteed to give what we want... reread + with open(fpath,'r') as f: + for _ in range(headerlength + 1 + 3*N): + f.readline() + element_type = f.readline().strip() + if not element_type == 'quad4': + print(f'WARNING: element type "{element_type}" not tested') + Ncell = int(f.readline()) + if chunksize is None: + conn = pd.read_csv(f,header=None,nrows=Ncell, + delim_whitespace=True) + else: + conn = pd.concat(pd.read_csv(f,header=None,nrows=Ncell, + delim_whitespace=True, + chunksize=chunksize)) + if verbose: + print(f'read connectivity data for {Ncell} cells') + # switch to 0-indexing + assert (conn.values.max() == N) + conn -= 1 + assert (conn.values.min() == 0) + # calculate cell centers + newindices = pd.RangeIndex(Ncell) + nodes = [mesh.loc[indices].set_index(newindices) for col,indices in conn.iteritems()] + cellcenters = sum(nodes) / len(nodes) + mesh = cellcenters -def read_vector(fpath,mesh,t=0.0,headerlength=4,chunksize=None): + return mesh + + +def read_vector(fpath,mesh,t=None,sort=False,headerlength=4,chunksize=None): """Read Ensight data array (ascii) into a dataframe with combined mesh information corresponding to the specified time; mesh should be read in by the read_mesh() function + + Parameters + ---------- + t: float + Simulation time to associate with this file [s], useful if + reading many files with the intention of using pd.concat + sort: bool + Sort by x,y,z + headerlength: integer + Number of header lines to skip + chunksize: integer or None + Chunksize parameter for pd.read_csv, can speed up I/O """ Npts = len(mesh) with open(fpath,'r') as f: @@ -41,7 +101,13 @@ def read_vector(fpath,mesh,t=0.0,headerlength=4,chunksize=None): else: vals = pd.concat(pd.read_csv(f,header=None,nrows=3*Npts,chunksize=chunksize)).values df = mesh.copy() - df['t'] = t uvw = pd.DataFrame(data=vals.reshape((Npts,3),order='F'), columns=['u','v','w']) df = pd.concat([df,uvw], axis=1) - return df.set_index(['t','x','y','z']) + if t is not None: + df['t'] = t + df = df.set_index(['t','x','y','z']) + else: + df = df.set_index(['x','y','z']) + if sort: + df = df.sort_index() + return df diff --git a/windtools/io/vtk.py b/windtools/io/vtk.py index eaefc77..4dfc508 100644 --- a/windtools/io/vtk.py +++ b/windtools/io/vtk.py @@ -1,4 +1,4 @@ -# Copyright 2019 NREL +# Copyright 2021 NREL # Licensed under the Apache License, Version 2.0 (the "License"); you may not use # this file except in compliance with the License. You may obtain a copy of the @@ -10,9 +10,376 @@ # specific language governing permissions and limitations under the License. """ -Functions to deal with simple VTK I/O. +Functions for handling VTKs """ + import struct +import vtk +from vtk.numpy_interface import dataset_adapter as dsa +from scipy.interpolate import griddata +import xarray as xr +import os +import datetime +import pandas as pd +import numpy as np + +def readVTK(vtkpath, sliceType=None, dateref=None, ti=None, tf=None, t=None, res=None, squash=None): + """ + Read VTK file(s) and interpolate into a uniform grid based on the limits read from the VTK + and resolution given. + + Function tailored to read multiple files with the following file structure: + full/path/to/ + ├── + ├── + │ ├── + │ ├── + │ ├── ... + │ └── + ├── ... + └── + + To read multiple VTKs, specify sliceType, ti, and tf; + To read a single VTK, there are two options: (i) specify full vtk path vtkpath, or + (ii) specity vtkpath, sliceType, and t. + + If reading more than a single VTK, it is assumed that they share the same coordinates. + + The function provides output for full 3-D VTK. If reading planar data, use the squash + option. + + Example calls: + # Read multiple + ds = readVTK('full/path/to/vtks', sliceType='U_zNormal.80.vtk', ti= 133200, tf=133205, + dateref= pd.to_datetime('2010-05-14 12:00:00', res=10) + # Read single + ds = readVTK('full/path/to/vtks/133200/U_zNormal.80.vtk', + dateref= pd.to_datetime('2010-05-14 12:00:00', res=10) + # Read single + ds = readVTK('full/path/to/vtks', sliceType='U_zNormal.80.vtk', t= 133200, + dateref= pd.to_datetime('2010-05-14 12:00:00', res=10) + + Parameters: + ----------- + vtkpath: str + Full path to the directory of time steps containing the VTK files (if reading + multiple); or full path to the vtk itself, with extension (if reading single) + sliceType: str + Common name of the collection of slices to be read. E.g. 'U_zNormal.80.vtk' + dateref: datetime + Reference time to be specified if datetime output is desired. Default is + float/integer output, representing seconds + ti, tf, t: int, float, str + Times directories that should be read. If a single time is specified, t, then + the nearest matching time-directory is read + res: int, float, or three-component tuple + resolution of the meshgrid in which the data will be interpolated onto. + If tuple, provide resolution in x, y, and z, in order + squash: str; 'x', 'y', or 'z' only + Squash the VTK into a plane (useful for terrain slices) + + Returns: + -------- + ds: xr.DataSet + Dataset containg the data with x, y [, z, time] as coordinates + + + written by Regis Thedin (regis.thedin@nrel.gov) + """ + + + # Single slice was requested, using `vtlpath='some/path/slice.vtk` + if os.path.isfile(vtkpath): + if isinstance(vtkpath, str): + if vtkpath.endswith('.vtk'): + print(f'Reading a single VTK. For output with `datetime` as coordinate, specify the path, sliceType, and t (see docstrings for example)') + x, y, z, out = readSingleVTK(vtkpath, res=res, squash=squash) + ds = VTK2xarray(x, y, z, out) + return ds + else: + raise SyntaxError("Single vtk specification using vtkpath='/path/to/file.vtk' should include the extension") + else: + raise SyntaxError("The vtkpath='path/to/vtks' should be a string.") + + + # Some checks + assert os.path.isdir(vtkpath), f'Directory of VTKs given {vtkpath} is not a directory. For single VTK, see docstrings for example.' + if not sliceType.endswith('.vtk'): + raise SyntaxError('sliceType should be given with .vtk extension') + if ti!=None and tf!=None: + if t!=None: raise ValueError('You can specify either t only, or ti and tf, but not all three') + if ti>tf: raise ValueError('tf should be larger than ti') + if ti==tf: + t = ti + elif tf!= None: + raise ValueError('If you specify tf, then ti should be specified too') + elif ti!= None: + raise ValueError('If you specify ti, then tf should be specified too') + else: + if t==None: + raise ValueError("You have to specify at least one time. If a single VTK is needed, use " \ + "vtkpath='path/to/file.vtk' ") + + # Get the time directories + times_str = sorted(os.listdir(vtkpath)) + times_float = [float(i) for i in times_str] + + if t is not None: + # Single time was requested + pos_t = min(range(len(times_float)), key=lambda i: abs(times_float[i]-t)) + if pos_t == 0 and t < 0.999*times_float[0]: + raise ValueError(f'No VTK found for time {t}. The first available VTK is at t={times_float[0]}') + if pos_t == len(times_str)-1 and t > 1.001*times_float[-1]: + raise ValueError(f'No VTK found for time {t}. The last available VTK is at t={times_float[-1]}') + t = times_str[pos_t] + print(f'Reading a single VTK for time {float(t)}') + x, y, z, out = readSingleVTK(os.path.join(vtkpath,t,sliceType), res=res, squash=squash) + ds = VTK2xarray(x, y, z, out, t, dateref) + return ds + + # Find limits of the requested subset + pos_ti = min(range(len(times_float)), key=lambda i: abs(times_float[i]-ti)) + pos_tf = min(range(len(times_float)), key=lambda i: abs(times_float[i]-tf)) + nvtk = pos_tf - pos_ti + + if nvtk == 0: + raise ValueError('No VTKs found for the time range specified. VTKs are available ' \ + f'between {times_float[0]} and {times_float[-1]}.') + + print(f'Number of VTKs to be read: {nvtk}') + + dslist = [] + for i, t in enumerate(times_str[pos_ti:pos_tf]): + print(f'Iteration {i}: processing time {t}... {100*i/nvtk:.2f}%', end='\r', flush=True) + x, y, z, out = readSingleVTK(os.path.join(vtkpath,t,sliceType), res=res, squash=squash) + current_ds = VTK2xarray(x, y, z, out, t, dateref) + dslist.append(current_ds) + + print('\nDone.') + + # Concatenate on the appropriate dimension + if 'time' in dslist[0].dims: + ds = xr.concat(dslist, dim='time') + elif 'datetime' in dslist[0].dims: + ds = xr.concat(dslist, dim='datetime') + + return ds + + + +def VTK2xarray(x, y, z, out, t=None, dateref=None): + """ + Convert x, y, z, and out from `readSingleVTK into a dataset. + If dateref is provided, then datetime is given in the output + + If plane of data, then the third direction is ommited and results + are given in terms of x and y (even if the data in on the yz plane). + If full 3-D data (no squash), then x, y, and z are given + + Parameters: + ----------- + x, y, z: array + Coordinate of the grid + out: nD-array + Values at the grid points. Size depends on the number of components + t: int, float, str (optional) + time to add to the DataSet as a coordinate + dateref: datetime (optional) + Reference time to be specified if datetime output is desired. Requires t + to be specified as well + + Returns: + -------- + ds: xr.DataSet + Dataset containg the data with x, y [, z, time] as coordinates + + written by Regis Thedin (regis.thedin@nrel.gov) + """ + + if dateref is not None and t is None: + raise SyntaxError('If dateref is specified, t should be specified too') + + if len(out) == 3: + # vector + ds = xr.Dataset({'u':(('x','y','z'), out[0,:]), 'v':(('x','y','z'), out[1,:]), 'w':(('x','y','z'), out[2,:])}) + elif len(out) == 1: + # scalar + ds = xr.Dataset({'var':(('x','y','z'), out[0,:])}) + elif len(out) == 6: + # tensor + ds = xr.Dataset({'var_xx':(('x','y','z'), out[0,:]), 'var_xy':(('x','y','z'), out[1,:]), 'var_xz':(('x','y','z'), out[2,:]), \ + 'var_yy':(('x','y','z'), out[3,:]), 'var_yz':(('x','y','z'), out[4,:]), 'var_zz':(('x','y','z'), out[5,:])}) + else: + raise ValueError(f'Read variable with {len(out)} components. Not sure how to follow. Stopping.') + + + ds = ds.assign_coords({'x':x, 'y':y, 'z':z}) + + # If data is on a plane (squash != None), get rid of the third dimesion + if len(ds.z) == 1: + ds = ds.squeeze(dim='z').drop_vars('z') + + if isinstance(t, (str, int, float)): + if type(dateref) in (datetime, datetime.datetime, pd.Timestamp): + t = pd.to_datetime(float(t), unit='s', origin=dateref).round('0.1S') + timename = 'datetime' + else: + t = float(t) + timename = 'time' + ds[timename] = t + ds = ds.expand_dims(timename).assign_coords({timename:[t]}) + + return ds + + + +def readSingleVTK(vtkfullpath, res=None, squash=None): + """ + Read a single VTK file and interpolates into a uniform grid based on the limits read from the VTK + + If requesting full 3-D field, it may be appropriate to give a tuple (resx, resy, resy) as input in `res` + + For full 3-D field, the function assumes the points are given within a structured grid of + either uniform resolution `res` or resolution (res[0],res[1],res[2]) in all directions. It uses + `griddata` for interpolation and thus a very large field will take significant time. + + Parameters: + ----------- + vtkfullpath: str + full path and filename of the desired VTK file + res: int, float, or three-component tuple + resolution of interpolated grid. If tuple, resolution in x, y, and z, in order + squash: str, 'x', 'y', or 'z' only + Squash the VTK into a plane (useful for terrain slices) + + Returns: + -------- + x, y, z: + meshgrid of x, y and z positions. + out: + array of shape (, , , ), where x and y are the long and short + direction, respectively. E.g. for Uz, out[2] + + written by Regis Thedin (regis.thedin@nrel.gov) + """ + + + # Check if the slice actually exists + if not os.path.isfile(vtkfullpath): + raise FileNotFoundError(f'Slice {os.path.basename(os.path.dirname(vtkfullpath))}/' \ + f'{os.path.basename(vtkfullpath)} does not exist.') + + # Open VTK + reader = vtk.vtkPolyDataReader() + reader.SetFileName(vtkfullpath) + reader.ReadAllVectorsOn() + reader.Update() + + # Load data + polydata = dsa.WrapDataObject(reader.GetOutput()) + ptdata = polydata.GetPointData() + coords = np.round(polydata.Points, 4) + data = ptdata.GetArray(0) + + # Determine what plane the VTK is in + if squash == None: + # Full 3-D field + dir1=0; dir2=1; + dirconst=2 + print('Reading full 3D VTK. It might take a while. If you are reading planes of data, use `squash`.') + elif squash=='x': + dirconst = 0 + dir1=1; dir2=2 + elif squash=='y': + dirconst = 1 + dir1=0; dir2=2 + elif squash=='z': + dirconst = 2 + dir1=0; dir2=1; + else: + raise ValueError("Squash is only available for 'x', 'y', or 'z'") + + + # OpenFOAM has a bug when saving VTK slices using the `type cuttingPlane` option in the dictionary. + # The bug results in a VTK where the coordinate points are no longer contained in a single plane. + # For the OpenFOAM-buggy VTK slice, a pattern does seem to exist. It is more clearly explained with + # an example: Consider a slice at y=4000 and the domain extents are from 0 to 6000 in y. The points + # are saved at 3 distinct planes: y=4000, y=6000, and another just under 6000, say 5998. The extra + # points that are not where they should be, are at/near the domain boundary _closest_ to the slice + # plane location. If the slice was, for example, at y=1000, then the boundary at y=0 will have the + # extra points. The only pattern that was identifiable is that 3 unique planes are created and the + # wrong ones are side by side. The idea followed here is that we identify the "isolated" plane and + # use it to give the coordinate of the actual sampled location. We recover the correct plane with + # the commands below. + # None of this is crucial to this function as is, since it is not returning any information about + # the plane the VTK is at, but rather only using that information for interpolation purposes. This + # is left here as a note for future extension of the function. + if squash in ['x','y','z']: + # For convenience, we use x and y to refer the dimension the slice varies + [xminbox, yminbox] = np.min(coords, 0)[[dir1, dir2]] + [xmaxbox, ymaxbox] = np.max(coords, 0)[[dir1, dir2]] + + uniqueCoords = np.unique(coords[:,dirconst]) + if np.argmax(np.diff(uniqueCoords, prepend=uniqueCoords[0])) == 1: + zlevel = uniqueCoords[0] + else: + zlevel = uniqueCoords[-1] + zminbox = zmaxbox = zlevel + + # Squash the data into the requested plane + coords[:,dirconst] = zlevel + + else: + [xminbox, yminbox, zminbox] = np.min(coords, 0)[[dir1, dir2, dirconst]] + [xmaxbox, ymaxbox, zmaxbox] = np.max(coords, 0)[[dir1, dir2, dirconst]] + + + + # Get the proper uniform or non-uniform resolution + if isinstance(res, (int, float)): + dx = dy = dz = res + elif isinstance(res, tuple): + try: dx=res[0]; dy=res[1]; dz=res[2] + except IndexError: print('Tuple must have 3 components'); raise + else: + raise ValueError('Resolution should be given as either a scalar (uniform '\ + 'resolution) or a 3-component tuple (resx, resy, resz).' ) + + + # Create the desired output grid + x1d = np.arange(xminbox,xmaxbox+dx,dx) + y1d = np.arange(yminbox,ymaxbox+dy,dy) + if squash in ['x','y','z']: + z1d = np.asarray([zlevel]) + else: + z1d = np.arange(zminbox, zmaxbox+dz, dz) + [x3d,y3d,z3d] = np.meshgrid(x1d, y1d, z1d, indexing='ij') + + n = np.ravel(x3d).shape[0] + coords_want = np.zeros(shape=(n,3)) + coords_want[:,dir1] = np.ravel(x3d) + coords_want[:,dir2] = np.ravel(y3d) + coords_want[:,dirconst] = np.ravel(z3d) + + # Interpolate values to given resolution + tmp = griddata(coords, data, coords_want, method='nearest') + + if tmp.size/n == 3: + # vector + out = np.reshape(np.vstack((tmp[:,0],tmp[:,1],tmp[:,2])), (3,x1d.size,y1d.size,z1d.size)) + elif tmp.size/n == 1: + # scalar + out=np.reshape(tmp,(1,x1d.size,y1d.size,z1d.size)) + elif tmp.size/n == 6: + # tensor + out=np.reshape(np.vstack((tmp[:,0],tmp[:,1],tmp[:,2],tmp[:,3],tmp[:,4],tmp[:,5])), (6,x1d.size,y1d.size,z1d.size)) + else: + # something else + raise NotImplementedError(f'Found variable with {tmp.size} components. Not sure how to follow. Stopping.') + + return x1d, y1d, z1d, out + + def vtk_write_structured_points( f, datadict, diff --git a/windtools/openfoam.py b/windtools/openfoam.py index d32899a..75b5ebf 100644 --- a/windtools/openfoam.py +++ b/windtools/openfoam.py @@ -9,6 +9,7 @@ # CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. +import os import re @@ -46,7 +47,7 @@ class InputFile(dict): 'table', ] - def __init__(self,fpath,nodef=False): + def __init__(self,fpath,nodef=False,include=False): """Create a dictionary of definitions from an OpenFOAM-style input file. @@ -59,7 +60,12 @@ def __init__(self,fpath,nodef=False): vector values to be included from another OpenFOAM file, then create a generic 'data' parent object to contain the file data. + include : bool, optional + Read additional input files in place specified by '#include' """ + self.fpath = fpath + self.nodef = nodef + self.include = include # read full file with open(fpath) as f: lines = f.readlines() @@ -68,7 +74,9 @@ def __init__(self,fpath,nodef=False): # trim single-line comments and remove directives for i,line in enumerate(lines): line = line.strip() - if line.startswith('#'): + if include and line.startswith('#include'): + lines[i] = line + elif line.startswith('#'): if self.DEBUG: print('Ignoring directive:',line) lines[i] = '' @@ -159,7 +167,11 @@ def _split_defs(self,txt): assert (idx > 0), 'problem parsing '+string+' field' string = txt[:idx].strip() - if string.endswith(';'): + if name == '#include': + names.append(name) + lines.append(string) + container.append(None) + elif string.endswith(';'): # found single definition if self.DEBUG: print('value=',string[:-1]) names.append(name) @@ -219,7 +231,20 @@ def _parse(self,name,defn,containertype,parent=None): if containertype is not None: print('container type:',containertype) defn = defn.strip() - if containertype is None: + if name == '#include': + includedfile = os.path.join(os.path.split(self.fpath)[0], + defn.strip('"')) + if self.DEBUG: + print('PARSING included file',includedfile) + tmp = InputFile(includedfile) + for key,val in tmp.items(): + if parent is None: + # parent is the InputFile object + self.__setitem__(key,val) + else: + assert isinstance(parent, dict) + parent[key] = val + elif containertype is None: # set single value in parent defn = self._try_cast(defn) # SET VALUE HERE From f6238b80f9c3cf0cfe6f057b12427a68a60a0905 Mon Sep 17 00:00:00 2001 From: Eliot Quon Date: Fri, 24 Mar 2023 14:31:48 -0600 Subject: [PATCH 3/4] Move windtools subtree to top level... Importing windtools modules directly from mmctools still works, e.g.: from mmctools.plotting import will import windtools.plotting. and serves as a useful shorthand. _For clarity_, the windtools modules can also be accessed from mmctools, but without the awkward `from mmctools.windtools.windtools.foo` syntax previously required. Now: from mmctools.windtools.foo import bar will import windtools.foo.bar --- mmctools/__init__.py | 2 +- {mmctools/windtools => windtools}/.gitignore | 0 {mmctools/windtools => windtools}/LICENSE | 0 {mmctools/windtools => windtools}/README.md | 0 .../windtools => windtools}/bin/sowfa_convert_source_history.py | 0 .../bin/sowfa_extract_elevation_from_stl.py | 0 {mmctools/windtools => windtools}/bin/sowfa_log_to_df.py | 0 {mmctools/windtools => windtools}/bin/sowfa_time_left.py | 0 {mmctools/windtools => windtools}/setup.py | 0 .../windtools/SOWFA6/constant/boundaryData.py | 0 {mmctools/windtools => windtools}/windtools/SOWFA6/log.py | 0 .../windtools/SOWFA6/postProcessing/averaging.py | 0 .../windtools/SOWFA6/postProcessing/probeSets.py | 0 .../windtools/SOWFA6/postProcessing/probes.py | 0 .../windtools/SOWFA6/postProcessing/reader.py | 0 .../windtools/SOWFA6/postProcessing/sourceHistory.py | 0 {mmctools/windtools => windtools}/windtools/common.py | 0 {mmctools/windtools => windtools}/windtools/inflow/general.py | 0 {mmctools/windtools => windtools}/windtools/inflow/synthetic.py | 0 {mmctools/windtools => windtools}/windtools/io/binary.py | 0 {mmctools/windtools => windtools}/windtools/io/ensight.py | 0 {mmctools/windtools => windtools}/windtools/io/series.py | 0 {mmctools/windtools => windtools}/windtools/io/vtk.py | 0 {mmctools/windtools => windtools}/windtools/openfast.py | 0 {mmctools/windtools => windtools}/windtools/openfoam.py | 0 {mmctools/windtools => windtools}/windtools/plotting.py | 0 26 files changed, 1 insertion(+), 1 deletion(-) rename {mmctools/windtools => windtools}/.gitignore (100%) rename {mmctools/windtools => windtools}/LICENSE (100%) rename {mmctools/windtools => windtools}/README.md (100%) rename {mmctools/windtools => windtools}/bin/sowfa_convert_source_history.py (100%) rename {mmctools/windtools => windtools}/bin/sowfa_extract_elevation_from_stl.py (100%) rename {mmctools/windtools => windtools}/bin/sowfa_log_to_df.py (100%) rename {mmctools/windtools => windtools}/bin/sowfa_time_left.py (100%) rename {mmctools/windtools => windtools}/setup.py (100%) rename {mmctools/windtools => windtools}/windtools/SOWFA6/constant/boundaryData.py (100%) rename {mmctools/windtools => windtools}/windtools/SOWFA6/log.py (100%) rename {mmctools/windtools => windtools}/windtools/SOWFA6/postProcessing/averaging.py (100%) rename {mmctools/windtools => windtools}/windtools/SOWFA6/postProcessing/probeSets.py (100%) rename {mmctools/windtools => windtools}/windtools/SOWFA6/postProcessing/probes.py (100%) rename {mmctools/windtools => windtools}/windtools/SOWFA6/postProcessing/reader.py (100%) rename {mmctools/windtools => windtools}/windtools/SOWFA6/postProcessing/sourceHistory.py (100%) rename {mmctools/windtools => windtools}/windtools/common.py (100%) rename {mmctools/windtools => windtools}/windtools/inflow/general.py (100%) rename {mmctools/windtools => windtools}/windtools/inflow/synthetic.py (100%) rename {mmctools/windtools => windtools}/windtools/io/binary.py (100%) rename {mmctools/windtools => windtools}/windtools/io/ensight.py (100%) rename {mmctools/windtools => windtools}/windtools/io/series.py (100%) rename {mmctools/windtools => windtools}/windtools/io/vtk.py (100%) rename {mmctools/windtools => windtools}/windtools/openfast.py (100%) rename {mmctools/windtools => windtools}/windtools/openfoam.py (100%) rename {mmctools/windtools => windtools}/windtools/plotting.py (100%) diff --git a/mmctools/__init__.py b/mmctools/__init__.py index 37e1830..d6a0026 100644 --- a/mmctools/__init__.py +++ b/mmctools/__init__.py @@ -20,5 +20,5 @@ # enable `from mmctools.foo.bar import baz # to import baz from windtools.foo.bar` import os mmctools_module = os.path.split(__file__)[0] -__path__.append(os.path.join(mmctools_module,'windtools','windtools')) +__path__.append(os.path.join(mmctools_module,'..','windtools','windtools')) diff --git a/mmctools/windtools/.gitignore b/windtools/.gitignore similarity index 100% rename from mmctools/windtools/.gitignore rename to windtools/.gitignore diff --git a/mmctools/windtools/LICENSE b/windtools/LICENSE similarity index 100% rename from mmctools/windtools/LICENSE rename to windtools/LICENSE diff --git a/mmctools/windtools/README.md b/windtools/README.md similarity index 100% rename from mmctools/windtools/README.md rename to windtools/README.md diff --git a/mmctools/windtools/bin/sowfa_convert_source_history.py b/windtools/bin/sowfa_convert_source_history.py similarity index 100% rename from mmctools/windtools/bin/sowfa_convert_source_history.py rename to windtools/bin/sowfa_convert_source_history.py diff --git a/mmctools/windtools/bin/sowfa_extract_elevation_from_stl.py b/windtools/bin/sowfa_extract_elevation_from_stl.py similarity index 100% rename from mmctools/windtools/bin/sowfa_extract_elevation_from_stl.py rename to windtools/bin/sowfa_extract_elevation_from_stl.py diff --git a/mmctools/windtools/bin/sowfa_log_to_df.py b/windtools/bin/sowfa_log_to_df.py similarity index 100% rename from mmctools/windtools/bin/sowfa_log_to_df.py rename to windtools/bin/sowfa_log_to_df.py diff --git a/mmctools/windtools/bin/sowfa_time_left.py b/windtools/bin/sowfa_time_left.py similarity index 100% rename from mmctools/windtools/bin/sowfa_time_left.py rename to windtools/bin/sowfa_time_left.py diff --git a/mmctools/windtools/setup.py b/windtools/setup.py similarity index 100% rename from mmctools/windtools/setup.py rename to windtools/setup.py diff --git a/mmctools/windtools/windtools/SOWFA6/constant/boundaryData.py b/windtools/windtools/SOWFA6/constant/boundaryData.py similarity index 100% rename from mmctools/windtools/windtools/SOWFA6/constant/boundaryData.py rename to windtools/windtools/SOWFA6/constant/boundaryData.py diff --git a/mmctools/windtools/windtools/SOWFA6/log.py b/windtools/windtools/SOWFA6/log.py similarity index 100% rename from mmctools/windtools/windtools/SOWFA6/log.py rename to windtools/windtools/SOWFA6/log.py diff --git a/mmctools/windtools/windtools/SOWFA6/postProcessing/averaging.py b/windtools/windtools/SOWFA6/postProcessing/averaging.py similarity index 100% rename from mmctools/windtools/windtools/SOWFA6/postProcessing/averaging.py rename to windtools/windtools/SOWFA6/postProcessing/averaging.py diff --git a/mmctools/windtools/windtools/SOWFA6/postProcessing/probeSets.py b/windtools/windtools/SOWFA6/postProcessing/probeSets.py similarity index 100% rename from mmctools/windtools/windtools/SOWFA6/postProcessing/probeSets.py rename to windtools/windtools/SOWFA6/postProcessing/probeSets.py diff --git a/mmctools/windtools/windtools/SOWFA6/postProcessing/probes.py b/windtools/windtools/SOWFA6/postProcessing/probes.py similarity index 100% rename from mmctools/windtools/windtools/SOWFA6/postProcessing/probes.py rename to windtools/windtools/SOWFA6/postProcessing/probes.py diff --git a/mmctools/windtools/windtools/SOWFA6/postProcessing/reader.py b/windtools/windtools/SOWFA6/postProcessing/reader.py similarity index 100% rename from mmctools/windtools/windtools/SOWFA6/postProcessing/reader.py rename to windtools/windtools/SOWFA6/postProcessing/reader.py diff --git a/mmctools/windtools/windtools/SOWFA6/postProcessing/sourceHistory.py b/windtools/windtools/SOWFA6/postProcessing/sourceHistory.py similarity index 100% rename from mmctools/windtools/windtools/SOWFA6/postProcessing/sourceHistory.py rename to windtools/windtools/SOWFA6/postProcessing/sourceHistory.py diff --git a/mmctools/windtools/windtools/common.py b/windtools/windtools/common.py similarity index 100% rename from mmctools/windtools/windtools/common.py rename to windtools/windtools/common.py diff --git a/mmctools/windtools/windtools/inflow/general.py b/windtools/windtools/inflow/general.py similarity index 100% rename from mmctools/windtools/windtools/inflow/general.py rename to windtools/windtools/inflow/general.py diff --git a/mmctools/windtools/windtools/inflow/synthetic.py b/windtools/windtools/inflow/synthetic.py similarity index 100% rename from mmctools/windtools/windtools/inflow/synthetic.py rename to windtools/windtools/inflow/synthetic.py diff --git a/mmctools/windtools/windtools/io/binary.py b/windtools/windtools/io/binary.py similarity index 100% rename from mmctools/windtools/windtools/io/binary.py rename to windtools/windtools/io/binary.py diff --git a/mmctools/windtools/windtools/io/ensight.py b/windtools/windtools/io/ensight.py similarity index 100% rename from mmctools/windtools/windtools/io/ensight.py rename to windtools/windtools/io/ensight.py diff --git a/mmctools/windtools/windtools/io/series.py b/windtools/windtools/io/series.py similarity index 100% rename from mmctools/windtools/windtools/io/series.py rename to windtools/windtools/io/series.py diff --git a/mmctools/windtools/windtools/io/vtk.py b/windtools/windtools/io/vtk.py similarity index 100% rename from mmctools/windtools/windtools/io/vtk.py rename to windtools/windtools/io/vtk.py diff --git a/mmctools/windtools/windtools/openfast.py b/windtools/windtools/openfast.py similarity index 100% rename from mmctools/windtools/windtools/openfast.py rename to windtools/windtools/openfast.py diff --git a/mmctools/windtools/windtools/openfoam.py b/windtools/windtools/openfoam.py similarity index 100% rename from mmctools/windtools/windtools/openfoam.py rename to windtools/windtools/openfoam.py diff --git a/mmctools/windtools/windtools/plotting.py b/windtools/windtools/plotting.py similarity index 100% rename from mmctools/windtools/windtools/plotting.py rename to windtools/windtools/plotting.py From 8d95ceff14fb524399ed101b879a9e8c1ca9a4df Mon Sep 17 00:00:00 2001 From: Eliot Quon Date: Fri, 24 Mar 2023 14:42:07 -0600 Subject: [PATCH 4/4] Update readme --- README.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/README.md b/README.md index e352225..e88a23d 100644 --- a/README.md +++ b/README.md @@ -106,6 +106,13 @@ cd /path/to/a2e-mmc/mmctools pip install -e . ``` +### `windtools` + +This repository includes a subtree from https://github.com/NREL/windtools, which +provides an essential plotting library (importable as `from mmctools.plotting +import ...`) as well as other simulation post-processing helper modules. + + ## Code Development Principles - All code should be usable in, and demonstrated by, Jupyter notebooks.