diff --git a/README.md b/README.md index e352225..e88a23d 100644 --- a/README.md +++ b/README.md @@ -106,6 +106,13 @@ cd /path/to/a2e-mmc/mmctools pip install -e . ``` +### `windtools` + +This repository includes a subtree from https://github.com/NREL/windtools, which +provides an essential plotting library (importable as `from mmctools.plotting +import ...`) as well as other simulation post-processing helper modules. + + ## Code Development Principles - All code should be usable in, and demonstrated by, Jupyter notebooks. diff --git a/mmctools/__init__.py b/mmctools/__init__.py index 37e1830..d6a0026 100644 --- a/mmctools/__init__.py +++ b/mmctools/__init__.py @@ -20,5 +20,5 @@ # enable `from mmctools.foo.bar import baz # to import baz from windtools.foo.bar` import os mmctools_module = os.path.split(__file__)[0] -__path__.append(os.path.join(mmctools_module,'windtools','windtools')) +__path__.append(os.path.join(mmctools_module,'..','windtools','windtools')) diff --git a/mmctools/windtools/windtools/io/ensight.py b/mmctools/windtools/windtools/io/ensight.py deleted file mode 100644 index f5faa0d..0000000 --- a/mmctools/windtools/windtools/io/ensight.py +++ /dev/null @@ -1,47 +0,0 @@ -# Copyright 2019 NREL - -# Licensed under the Apache License, Version 2.0 (the "License"); you may not use -# this file except in compliance with the License. You may obtain a copy of the -# License at http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software distributed -# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR -# CONDITIONS OF ANY KIND, either express or implied. See the License for the -# specific language governing permissions and limitations under the License. - -import os -import pandas as pd - - -def read_mesh(fpath,headerlength=8,chunksize=None): - """Read Ensight mesh file (ascii) into a dataframe""" - with open(fpath,'r') as f: - for _ in range(headerlength): - f.readline() - N = int(f.readline()) - if chunksize is None: - mesh = pd.read_csv(f,header=None,nrows=3*N).values - else: - mesh = pd.concat(pd.read_csv(f,header=None,nrows=3*N,chunksize=chunksize)).values - df = pd.DataFrame(data=mesh.reshape((N,3),order='F'), columns=['x','y','z']) - return df - - -def read_vector(fpath,mesh,t=0.0,headerlength=4,chunksize=None): - """Read Ensight data array (ascii) into a dataframe with combined mesh - information corresponding to the specified time; mesh should be read in by - the read_mesh() function - """ - Npts = len(mesh) - with open(fpath,'r') as f: - for _ in range(headerlength): - f.readline() - if chunksize is None: - vals = pd.read_csv(f,header=None,nrows=3*Npts).values - else: - vals = pd.concat(pd.read_csv(f,header=None,nrows=3*Npts,chunksize=chunksize)).values - df = mesh.copy() - df['t'] = t - uvw = pd.DataFrame(data=vals.reshape((Npts,3),order='F'), columns=['u','v','w']) - df = pd.concat([df,uvw], axis=1) - return df.set_index(['t','x','y','z']) diff --git a/mmctools/windtools/windtools/io/vtk.py b/mmctools/windtools/windtools/io/vtk.py deleted file mode 100644 index eaefc77..0000000 --- a/mmctools/windtools/windtools/io/vtk.py +++ /dev/null @@ -1,171 +0,0 @@ -# Copyright 2019 NREL - -# Licensed under the Apache License, Version 2.0 (the "License"); you may not use -# this file except in compliance with the License. You may obtain a copy of the -# License at http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software distributed -# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR -# CONDITIONS OF ANY KIND, either express or implied. See the License for the -# specific language governing permissions and limitations under the License. - -""" -Functions to deal with simple VTK I/O. -""" -import struct - -def vtk_write_structured_points( f, - datadict, - ds=None,dx=None,dy=None,dz=None, - origin=(0.0,0.0,0.0), - indexorder='ijk', - vtk_header='# vtk DataFile Version 2.0', - vtk_datatype='float', - vtk_description='really cool data' - ): - """Write a VTK dataset with regular topology to file handle 'f' - written by Eliot Quon (eliot.quon@nrel.gov) - - Inputs are written with x increasing fastest, then y, then z. - - Example: Writing out two vector fields in one VTK file. - ``` - from windtools.io.vtk import vtk_write_structured_points - with open('some_data.vtk','wb') as f: - vtk_write_structured_points(f, - {'vel':np.stack((u,v,w))}, - ds=1.0, - indexorder='ijk') - ``` - - Parameters - ---------- - datadict : dict - Dictionary with keys as the data names. Data are either scalar - fields with shape (nx,ny,nz) or vector fields with shape - (3,nx,ny,nz). - ds : float, optional - Default grid spacing; dx,dy,dz may be specified to override - dx, dy, dz : float, optional - Specific grid spacings; if ds is not specified, then all three - must be specified - origin : list-like, optional - Origin of the grid - indexorder: str - Specify the indexing convention (standard: 'ijk', TTUDD: 'jik') - - @author: ewquon - """ - # calculate grid spacings if needed - if ds: - if not dx: dx = ds - if not dy: dy = ds - if not dz: dz = ds - else: - assert( dx > 0 and dy > 0 and dz > 0 ) - - # check data - nx = ny = nz = None - datatype = {} - for name,data in datadict.items(): - dims = data.shape - if len(dims) == 3: - datatype[name] = 'scalar' - elif len(dims) == 4: - assert dims[0] == 3 - datatype[name] = 'vector' - else: - raise ValueError('Unexpected "'+name+'" array shape: '+str(data.shape)) - if nx is None: - nx = dims[-3] - ny = dims[-2] - nz = dims[-1] - else: - assert (nx==dims[-3]) and (ny==dims[-2]) and (nz==dims[-1]) - - # write header - if 'b' in f.mode: - binary = True - import struct - if bytes is str: - # python 2 - def b(s): - return str(s) - else: - # python 3 - def b(s): - return bytes(s,'utf-8') - f.write(b(vtk_header+'\n')) - f.write(b(vtk_description+'\n')) - f.write(b('BINARY\n')) - f.write(b('DATASET STRUCTURED_POINTS\n')) - - # write out mesh descriptors - f.write(b('DIMENSIONS {:d} {:d} {:d}\n'.format(nx,ny,nz))) - f.write(b('ORIGIN {:f} {:f} {:f}\n'.format(origin[0],origin[1],origin[2]))) - f.write(b('SPACING {:f} {:f} {:f}\n'.format(dx,dy,dz))) - - # write out data - f.write(b('POINT_DATA {:d}\n'.format(nx*ny*nz))) - - else: - binary = False - f.write(vtk_header+'\n') - f.write(vtk_description+'\n') - f.write('ASCII\n') - f.write('DATASET STRUCTURED_POINTS\n') - - # write out mesh descriptors - f.write('DIMENSIONS {:d} {:d} {:d}\n'.format(nx,ny,nz)) - f.write('ORIGIN {:f} {:f} {:f}\n'.format(origin[0],origin[1],origin[2])) - f.write('SPACING {:f} {:f} {:f}\n'.format(dx,dy,dz)) - - # write out data - f.write('POINT_DATA {:d}\n'.format(nx*ny*nz)) - - for name,data in datadict.items(): - outputtype = datatype[name] - if outputtype=='vector': - u = data[0,:,:,:] - v = data[1,:,:,:] - w = data[2,:,:,:] - elif outputtype=='scalar': - u = data - else: - raise ValueError('Unexpected data type '+outputtype) - - name = name.replace(' ','_') - - mapping = { 'i': range(nx), 'j': range(ny), 'k': range(nz) } - ijkranges = [ mapping[ijk] for ijk in indexorder ] - - if outputtype=='vector': - if binary: - f.write(b('{:s}S {:s} {:s}\n'.format(outputtype.upper(),name,vtk_datatype))) - for k in ijkranges[2]: - for j in ijkranges[1]: - for i in ijkranges[0]: - f.write(struct.pack('>fff', u[i,j,k], v[i,j,k], w[i,j,k])) # big endian - else: #ascii - f.write('{:s}S {:s} {:s}\n'.format(outputtype.upper(),name,vtk_datatype)) - for k in ijkranges[2]: - for j in ijkranges[1]: - for i in ijkranges[0]: - f.write(' {:f} {:f} {:f}\n'.format(u[i,j,k], v[i,j,k], w[i,j,k])) - elif outputtype=='scalar': - if binary: - f.write(b('{:s}S {:s} {:s}\n'.format(outputtype.upper(),name,vtk_datatype))) - f.write(b('LOOKUP_TABLE default\n')) - for k in ijkranges[2]: - for j in ijkranges[1]: - for i in ijkranges[0]: - #f.write(struct.pack('f',u[j,i,k])) # native endianness - f.write(struct.pack('>f',u[i,j,k])) # big endian - else: - f.write('{:s}S {:s} {:s}\n'.format(outputtype.upper(),name,vtk_datatype)) - f.write('LOOKUP_TABLE default\n') - for k in ijkranges[2]: - for j in ijkranges[1]: - for i in ijkranges[0]: - f.write(' {:f}\n'.format(u[i,j,k])) - diff --git a/setup.py b/setup.py index 5d5f4ab..ada7f74 100644 --- a/setup.py +++ b/setup.py @@ -28,7 +28,7 @@ URL = 'https://github.com/a2e-mmc/mmctools' EMAIL = 'eliot.quon@nrel.gov' AUTHOR = 'U.S. Department of Energy' -REQUIRES_PYTHON = '>=3.10.6' +REQUIRES_PYTHON = '>=3.10.5' VERSION = '0.1.1' # What packages are required for this module to be executed? diff --git a/mmctools/windtools/.gitignore b/windtools/.gitignore similarity index 100% rename from mmctools/windtools/.gitignore rename to windtools/.gitignore diff --git a/mmctools/windtools/LICENSE b/windtools/LICENSE similarity index 100% rename from mmctools/windtools/LICENSE rename to windtools/LICENSE diff --git a/mmctools/windtools/README.md b/windtools/README.md similarity index 100% rename from mmctools/windtools/README.md rename to windtools/README.md diff --git a/mmctools/windtools/bin/sowfa_convert_source_history.py b/windtools/bin/sowfa_convert_source_history.py similarity index 100% rename from mmctools/windtools/bin/sowfa_convert_source_history.py rename to windtools/bin/sowfa_convert_source_history.py diff --git a/mmctools/windtools/bin/sowfa_extract_elevation_from_stl.py b/windtools/bin/sowfa_extract_elevation_from_stl.py similarity index 100% rename from mmctools/windtools/bin/sowfa_extract_elevation_from_stl.py rename to windtools/bin/sowfa_extract_elevation_from_stl.py diff --git a/mmctools/windtools/bin/sowfa_log_to_df.py b/windtools/bin/sowfa_log_to_df.py similarity index 100% rename from mmctools/windtools/bin/sowfa_log_to_df.py rename to windtools/bin/sowfa_log_to_df.py diff --git a/windtools/bin/sowfa_time_left.py b/windtools/bin/sowfa_time_left.py new file mode 100755 index 0000000..c3f0887 --- /dev/null +++ b/windtools/bin/sowfa_time_left.py @@ -0,0 +1,159 @@ +# Copyright 2021 NREL + +# Licensed under the Apache License, Version 2.0 (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software distributed +# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +# CONDITIONS OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. + +# +# Usage +# ----- +# sowfa_time_left.py # to scrape timing from latest log.*.${application} +# sowfa_time_left.py /path/to/solver.log # to scrape timing from specific file +# +# Options +# ------- +# -plot : plot timing history +# -write : write timing history +# + +import sys +import os +import glob +import time +import numpy as np + +log_est_total_time = 'estimated_total_time' + +dpath = '.' + +makeplots = False +if '-plot' in sys.argv: + makeplots = True + import matplotlib.pyplot as plt + sys.argv.remove('-plot') + +writehist = False +if '-write' in sys.argv: + writehist = True + sys.argv.remove('-write') + +if len(sys.argv) > 1: + fpath = sys.argv[1] + dpath = os.path.split(fpath)[0] + +with open(os.path.join(dpath,'system','controlDict'),'r') as f: + for line in f: + if line.strip().startswith('application'): + app = line.split()[1].split(';')[0] + elif line.strip().startswith('deltaT'): + dt = float(line.split()[1][:-1]) + elif line.strip().startswith('endTime'): + endTime = float(line.split()[1][:-1]) + +if len(sys.argv) == 1: + import glob + logfiles = glob.glob('log.*'+app) + logfiles.sort() + fpath = logfiles[-1] + +nsteps = 0 +startTime = -1 +simTimes = [] +clockTimes = [] +CourantMax = [] +try: + with open(fpath,'r') as f: + for line in f: + if line.startswith('Create mesh'): + startTime = float(line.split()[-1]) + if startTime > 0: + print('Detected restart from t =',startTime) + elif line.startswith('Time ='): + curTime = float(line.split()[2]) + simTimes.append(curTime) + nsteps += 1 + elif line.startswith('ExecutionTime ='): + elapsedTime = float(line.split()[6]) + clockTimes.append(elapsedTime) + elif line.startswith('Courant Number'): + cflmax = float(line.split()[-1]) + CourantMax.append(cflmax) +except NameError: + sys.exit('USAGE: '+sys.argv[0]+' log_file') +except IOError: + sys.exit('Problem reading '+fpath) + +completed = (curTime-startTime) / (endTime-startTime) + +def disptime(t, + threshold=100.0, + timeunits=['s','min','hrs','days'], + timeconvert=[60.,60.,24.]): + i = 0 + t *= 1.0 # convert to float + while t > threshold and i < len(timeunits)-1: + t /= timeconvert[i] + i += 1 + return '%.1f %s' % (t,timeunits[i]) # old format, for compatibility + +print('Simulation is at currently at t = {:.1f} and will end at {:.1f} ({:.1f}% complete)'.format( + curTime, endTime, 100*completed)) +print(app,'has been running for',nsteps,'steps') +print('Elapsed time:',disptime(elapsedTime)) + +timestep_size = np.diff(np.array(simTimes)) +ctime_per_step = np.diff(np.array(clockTimes)) +print('Average/min/max time per step', \ + np.mean(ctime_per_step), np.min(ctime_per_step), np.max(ctime_per_step)) + +totalTime = elapsedTime / completed +print('ESTIMATED TOTAL TIME:',disptime(totalTime)) +if log_est_total_time: + try: + with open(os.path.join(dpath,log_est_total_time),'a') as f: + f.write(time.strftime('%x %X\tsimulated t={:.1f}s, N={:d}\tESTIMATED TOTAL TIME: {:s}\n'.format(curTime,nsteps,disptime(totalTime)))) + except IOError: + pass + +remainingTime = totalTime - elapsedTime +print('Remaining time:',disptime(remainingTime)) + +timeStepsLeft = (endTime-curTime) / timestep_size[-1] +remainingTimeOpt = timeStepsLeft * ctime_per_step[-1] +print('Remaining time (based on clocktime for last timestep):',disptime(remainingTimeOpt)) +print(' ',timeStepsLeft,'time steps left') +print(' ~',ctime_per_step[-1],'clock time per step') + +print('Average/min/max maximum Courant number per step', \ + np.mean(CourantMax), np.min(CourantMax), np.max(CourantMax)) + +print(time.strftime('Current date/time is %x %X')) + +if writehist: + N = min(len(simTimes),len(ctime_per_step)) + with open(fpath+'.timing','w') as f: + f.write(time.strftime('# Current system time: %x %X\n')) + f.write('# step simulation_time wallclock_time_per_step\n') + for i in range(N): + f.write('{:d} {:f} {:f}\n'.format(i+1,simTimes[i],ctime_per_step[i])) + +if makeplots: + plt.figure() + plt.plot(ctime_per_step) + plt.xlabel('iteration') + plt.ylabel('Clock time / step [s]') + + if (np.max(timestep_size)-np.min(timestep_size)) > 1e-6: + plt.figure() + plt.plot(timestep_size) + plt.xlabel('iteration') + plt.ylabel('simulated timestep [s]') + else: + print('Skipping timestep plot for constant dt=',np.mean(timestep_size)) + + plt.show() diff --git a/mmctools/windtools/setup.py b/windtools/setup.py similarity index 100% rename from mmctools/windtools/setup.py rename to windtools/setup.py diff --git a/mmctools/windtools/windtools/SOWFA6/constant/boundaryData.py b/windtools/windtools/SOWFA6/constant/boundaryData.py similarity index 100% rename from mmctools/windtools/windtools/SOWFA6/constant/boundaryData.py rename to windtools/windtools/SOWFA6/constant/boundaryData.py diff --git a/mmctools/windtools/windtools/SOWFA6/log.py b/windtools/windtools/SOWFA6/log.py similarity index 100% rename from mmctools/windtools/windtools/SOWFA6/log.py rename to windtools/windtools/SOWFA6/log.py diff --git a/mmctools/windtools/windtools/SOWFA6/postProcessing/averaging.py b/windtools/windtools/SOWFA6/postProcessing/averaging.py similarity index 100% rename from mmctools/windtools/windtools/SOWFA6/postProcessing/averaging.py rename to windtools/windtools/SOWFA6/postProcessing/averaging.py diff --git a/mmctools/windtools/windtools/SOWFA6/postProcessing/probeSets.py b/windtools/windtools/SOWFA6/postProcessing/probeSets.py similarity index 68% rename from mmctools/windtools/windtools/SOWFA6/postProcessing/probeSets.py rename to windtools/windtools/SOWFA6/postProcessing/probeSets.py index a56818c..29f7aea 100644 --- a/mmctools/windtools/windtools/SOWFA6/postProcessing/probeSets.py +++ b/windtools/windtools/SOWFA6/postProcessing/probeSets.py @@ -16,7 +16,7 @@ """ from __future__ import print_function -import os +import os, glob import pandas as pd import numpy as np from .reader import Reader @@ -106,7 +106,8 @@ def __init__(self, dpath=None, tstart=None, tend=None, varList='all', posPert=0. self.tstart = tstart self.tend = tend self.varList = varList - self._allVars = {'U','UMean','T','TMean','TPrimeUPrimeMean','UPrime2Mean','p_rgh'} + self._allVars = {'U','T','p_rgh','UMean','TMean','UPrime2Mean','TPrimeUPrimeMean'} + self._printzagl = False super().__init__(dpath,includeDt=True,**kwargs) @@ -147,7 +148,10 @@ def _processdirs(self, tdirList, trimOverlap=False, **kwargs): for prefix in fprefix: for param in fparam: for suffix in fsuffix: - fileList.append( prefix + param + '_' + var + suffix ) + try: + fileList.append( prefix + param + '_' + var + suffix ) + except TypeError: + raise ValueError('Specify fprefix, fparam, varList, and fsuffix (check spelling)') outputs = fileList # Get list of times and trim the data @@ -165,26 +169,32 @@ def _processdirs(self, tdirList, trimOverlap=False, **kwargs): if not tdirList: raise ValueError('No time directories found') - # Process all data + # Process all data. Loop on files, not variables. the files, however, contain a single varible. for field in outputs: - arrays = [ self._read_data( tdir,field ) for tdir in tdirList ] + # parse the name to create the right variable (var is always a list) + param, var = self._parseProbeName(field) + # Read the file that contains the variable, getting only the desired var + arrays = [ self._read_data( tdir, field, param, var ) for tdir in tdirList ] # combine into a single array and trim end of time series arrays = np.concatenate(arrays)[:self.imax,:] - # parse the name to create the right variable - param, var = self._parseProbeName(field) - # add the zagl to the array - arrays = np.hstack((arrays[:,:4], \ - np.full((arrays.shape[0],1),param), \ - arrays[:,4:])) - # append to (or create) a variable attribute + + # if `param` is an integer, consider that zagl and add to the array. Otherwise, skip it + if isinstance(param, int): + print('Param is integer. Assuming it means zagl and adding it') + # add the zagl to the array + arrays = np.hstack((arrays[:,:4], \ + np.full((arrays.shape[0],1),param), \ + arrays[:,4:])) + self._printzagl = True + try: setattr(self,var,np.concatenate((getattr(self,var),arrays))) except AttributeError: setattr( self, var, arrays ) - - if not var in self._processed: + + if not var in self._processed: self._processed.append(var) - print(' read',field) + print(f' read {self.fprefix}{param}_*{self.fsuffix}, variable {var}') self.t = np.unique(arrays[:,0]) self.Nt = len(self.t) @@ -198,24 +208,82 @@ def _processdirs(self, tdirList, trimOverlap=False, **kwargs): def _parseProbeName(self, field): - # Example: get 'vmasts_50mGrid_h30_T.xy' and return param=30, var='T' - # Remove the prefix from the full field name - f = field.replace(self.fprefix,'') - # Substitude the first underscore with a dot and split array - f = f.replace('_','.',1).split('.') - for i in set(f).intersection(self._allVars): - var = i - param = int(f[-3]) + # This function will only receive `field` with one variable, even if + # that file does not exist. + # Examples: gets 'vmasts_50mGrid_h30_T.xy' and returns param=30, var='T' + # gets 'vmasts_m1_1_U.xy' and returns param='m1_1', var='U' + # it will never get 'vmasts_m1_2_T_p_rgh' + + # Remove the prefix and suffix from the full field name + f = field.replace(self.fprefix,'').replace(self.fsuffix,'') + + # Find the variable(s) that are in the field name + for v in self._allVars: + if v in f: + var = v + # remove the variable fromm field, alongside preceding underscore + f = f.replace('_'+v, '') + break + + # what is left is the param + if f.isdigit(): param = int(f) + else: param = f + return param, var + + def _getFileContainingVar(self, dpath, param, var): + # Example: gets 'vmast_50_T.xy' and returns 'vmast_50_T_p_rgh.xy' and pos=1 + # gets 'vmast_50_T.xy' and returns 'vmast_50_T.xy' and pos=1 + # gets 'vmast_50_T.xy' and returns 'vmast_50_p_rgh_T.xy' and pos=2 + + # Get filename that has `param` and `var` in it + fname = os.path.basename(glob.glob(os.path.join(dpath,'*'+param+'_*'+var+'*'))[0]) + + # Get all the variables present in the current file. E.g.: ['T'], ['T','prgh'] + varsInFname = fname.replace(self.fprefix,'').replace(param+'_','').replace(self.fsuffix,'') \ + .replace('p_rgh','prgh').split('_') + + # Find position of the variable. First get rid of _ in p_rgh + pos = varsInFname.index(var.replace('_','')) + + # Length of the variable + if var in ['T','TMean','p_rgh']: + lenvar = 1 + elif var in ['U','UMean']: + lenvar = 3 + elif var in ['TPrimeUPrimeMean','UPrime2Mean']: + lenvar = 6 + else: + raise NotImplementedError('Unknown variable name. Consider expanding the class.') + + pos = pos*lenvar+1 - def _read_data(self, dpath, fname): - fpath = dpath + os.sep + fname + return fname, pos, lenvar + + + def _read_data(self, dpath, fname, param, var): + + fpath = dpath + os.sep + fname + pos = 0 + + # The sampling set groups scalars, vectors, and tensors in the same file. Due to this, + # files with names such as 'vmasts_h30_T_p_rgh.xy' will exist. If the desired variable + # is part of a file with multiple variables, then the `fname` filename passed to this + # function will not exist. Thus, we get the actual filename that contains the desired + # variable in its name and what position (column) that variable is in. + if not os.path.isfile(fpath): + actualfname, pos, lenvar = self._getFileContainingVar(dpath, param, var) + fpath = dpath + os.sep + actualfname + currentTime = float(os.path.basename(dpath)) with open(fpath) as f: try: # read the actual data from probes array = self._read_probe_posAndData(f) + # get the correct location of the var if needed + if pos != 0: + array = np.c_[array[:,0:3], array[:,2+pos:2+pos+lenvar]] # add current time step info to first column array = np.c_[np.full(array.shape[0],currentTime), array] except IOError: @@ -279,6 +347,7 @@ def to_pandas(self,itime=None,fields=None,dtype=None): # create dataframes for each field print('Creating dataframe ...') + printzagl = self._printzagl data = {} for var in fields: print('processing', var) @@ -287,22 +356,30 @@ def to_pandas(self,itime=None,fields=None,dtype=None): data['time'] = F[:,0] data['x'] = F[:,1] data['y'] = F[:,2] - data['zabs'] = F[:,3] - data['zagl'] = F[:,4] - if F.shape[1]==6: + if printzagl: + data['zabs'] = F[:,3] + data['zagl'] = F[:,4] + else: + data['z'] = F[:,3] + if F.shape[1]==5+printzagl: # scalar - data[var] = F[:,5:].flatten() - elif F.shape[1]==8: + data[var] = F[:,4+printzagl:].flatten() + elif F.shape[1]==7+printzagl: # vector for j,name in enumerate(['x','y','z']): - data[var+name] = F[:,5+j].flatten() - elif F.shape[1]==11: + data[var+name] = F[:,4+printzagl+j].flatten() + elif F.shape[1]==10+printzagl: # symmetric tensor for j,name in enumerate(['xx','xy','xz','yy','yz','zz']): - data[var+name] = F[:,5+j].flatten() + data[var+name] = F[:,4+printzagl+j].flatten() df = pd.DataFrame(data=data,dtype=dtype) - return df.sort_values(['time','x','y','zabs','zagl']).set_index(['time','x','y','zagl']) + if printzagl: + df = df.sort_values(['time','x','y','zabs','zagl']).set_index(['time','x','y','zagl']) + else: + df = df.sort_values(['time','x','y','z']).set_index(['time','x','y','z']) + + return df def to_netcdf(self,fname,fieldDescriptions={},fieldUnits={}): raise NotImplementedError('Not available for ProbeSet class.') diff --git a/mmctools/windtools/windtools/SOWFA6/postProcessing/probes.py b/windtools/windtools/SOWFA6/postProcessing/probes.py similarity index 100% rename from mmctools/windtools/windtools/SOWFA6/postProcessing/probes.py rename to windtools/windtools/SOWFA6/postProcessing/probes.py diff --git a/mmctools/windtools/windtools/SOWFA6/postProcessing/reader.py b/windtools/windtools/SOWFA6/postProcessing/reader.py similarity index 100% rename from mmctools/windtools/windtools/SOWFA6/postProcessing/reader.py rename to windtools/windtools/SOWFA6/postProcessing/reader.py diff --git a/mmctools/windtools/windtools/SOWFA6/postProcessing/sourceHistory.py b/windtools/windtools/SOWFA6/postProcessing/sourceHistory.py similarity index 100% rename from mmctools/windtools/windtools/SOWFA6/postProcessing/sourceHistory.py rename to windtools/windtools/SOWFA6/postProcessing/sourceHistory.py diff --git a/mmctools/windtools/windtools/common.py b/windtools/windtools/common.py similarity index 100% rename from mmctools/windtools/windtools/common.py rename to windtools/windtools/common.py diff --git a/mmctools/windtools/windtools/inflow/general.py b/windtools/windtools/inflow/general.py similarity index 100% rename from mmctools/windtools/windtools/inflow/general.py rename to windtools/windtools/inflow/general.py diff --git a/mmctools/windtools/windtools/inflow/synthetic.py b/windtools/windtools/inflow/synthetic.py similarity index 100% rename from mmctools/windtools/windtools/inflow/synthetic.py rename to windtools/windtools/inflow/synthetic.py diff --git a/mmctools/windtools/windtools/io/binary.py b/windtools/windtools/io/binary.py similarity index 56% rename from mmctools/windtools/windtools/io/binary.py rename to windtools/windtools/io/binary.py index c001295..6d48880 100644 --- a/mmctools/windtools/windtools/io/binary.py +++ b/windtools/windtools/io/binary.py @@ -13,7 +13,7 @@ import struct -class BinaryFile: +class BinaryFile(object): """ Helper class for handling binary file I/O """ @@ -62,44 +62,64 @@ def unpack(self,*args): except struct.error: raise IOError - def read_char(self): - return self.f.read(1).decode('utf-8') + def read_char(self,encoding='utf-8'): + return self.f.read(1).decode(encoding) - def readline(self): + def readline(self,encoding='utf-8'): s = '' - b = self.read_char() - while not b == '\n': + b = self.read_char(encoding) + while not b in ['\n','']: s += b - b = self.f.read(1).decode('utf-8') + b = self.read_char(encoding) return s + b - # integers + # integer reads def read_int1(self,N=1): - if N==1: return self.unpack('b',self.f.read(1))[0] #short - else: return self.unpack('{:d}b',self.f.read(N*1))[0:N] #short + if N==1: + return self.unpack('b',self.f.read(1))[0] #short + else: + return self.unpack('{:d}b',self.f.read(N*1))[0:N] #short def read_int2(self,N=1): - if N==1: return self.unpack('h',self.f.read(2))[0] #short - else: return self.unpack('{:d}h'.format(N),self.f.read(N*2))[0:N] #short + if N==1: + return self.unpack('h',self.f.read(2))[0] #short + else: + return self.unpack('{:d}h'.format(N),self.f.read(N*2))[0:N] #short def read_int4(self,N=1): - if N==1: return self.unpack('i',self.f.read(4))[0] #int - else: return self.unpack('{:d}i'.format(N),self.f.read(N*4))[0:N] #int + if N==1: + return self.unpack('i',self.f.read(4))[0] #int + else: + return self.unpack('{:d}i'.format(N),self.f.read(N*4))[0:N] #int def read_int8(self,N=1): - if N==1: return self.unpack('l',self.f.read(8))[0] #long - else: return self.unpack('{:d}l'.format(N),self.f.read(N*8))[0:N] #long + if N==1: + return self.unpack('l',self.f.read(8))[0] #long + else: + return self.unpack('{:d}l'.format(N),self.f.read(N*8))[0:N] #long + def read_int(self,N=1): + return self.read_int4(N) - # floats + # float reads def read_float(self,N=1,dtype=float): - if N==1: return dtype( self.unpack('f',self.f.read(4))[0] ) - else: return [ dtype(val) for val in self.unpack('{:d}f'.format(N),self.f.read(N*4))[0:N] ] + if N==1: + return dtype( self.unpack('f',self.f.read(4))[0] ) + else: + return [ dtype(val) for val in self.unpack('{:d}f'.format(N),self.f.read(N*4))[0:N] ] def read_double(self,N=1): - if N==1: return self.unpack('d',self.f.read(8))[0] - else: return self.unpack('{:d}d'.format(N),self.f.read(N*8))[0:N] + if N==1: + return self.unpack('d',self.f.read(8))[0] + else: + return self.unpack('{:d}d'.format(N),self.f.read(N*8))[0:N] def read_real4(self,N=1): return self.read_float(N,dtype=np.float32) def read_real8(self,N=1): return self.read_float(N,dtype=np.float64) # binary output + def write(self,val,encoding='utf-8'): + if isinstance(val,str): + self.f.write(val.encode(encoding)) + else: + self.f.write(val) + def write_type(self,val,type): if hasattr(val,'__iter__'): N = len(val) @@ -107,16 +127,19 @@ def write_type(self,val,type): else: self.f.write(struct.pack(type,val)) - # aliases - def read_int(self,N=1): return self.read_int4(N) - - def write_int1(self,val): self.write_type(val,'b') - def write_int2(self,val): self.write_type(val,'h') - def write_int4(self,val): self.write_type(val,'i') - def write_int8(self,val): self.write_type(val,'l') - - def write_int(self,val): self.write_int4(val) - def write_float(self,val): self.write_type(val,'f') - def write_double(self,val): self.write_type(val,'d') + def write_int(self,val): + self.write_int4(val) + def write_int1(self,val): + self.write_type(val,'b') + def write_int2(self,val): + self.write_type(val,'h') + def write_int4(self,val): + self.write_type(val,'i') + def write_int8(self,val): + self.write_type(val,'l') + def write_float(self,val): + self.write_type(val,'f') + def write_double(self,val): + self.write_type(val,'d') diff --git a/windtools/windtools/io/ensight.py b/windtools/windtools/io/ensight.py new file mode 100644 index 0000000..b0b2183 --- /dev/null +++ b/windtools/windtools/io/ensight.py @@ -0,0 +1,113 @@ +# Copyright 2019 NREL + +# Licensed under the Apache License, Version 2.0 (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software distributed +# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +# CONDITIONS OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. + +import os +import pandas as pd + + +def read_mesh(fpath,headerlength=8,chunksize=None,read_connectivity=False,verbose=False): + """Read Ensight mesh file (ascii) into a dataframe + + Parameters + ---------- + headerlength: integer + Number of header lines to skip + chunksize: integer or None + Chunksize parameter for pd.read_csv, can speed up I/O + read_connectivity: bool + If True, read in connectivity information and convert points + from nodes to cell centers + """ + with open(fpath,'r') as f: + # header + for _ in range(headerlength): + line = f.readline() + if verbose: + print(line,end='') + N = int(f.readline()) + + # read points + if chunksize is None: + mesh = pd.read_csv(f,header=None,nrows=3*N).values + else: + mesh = pd.concat(pd.read_csv(f,header=None,nrows=3*N,chunksize=chunksize)).values + if verbose: + print(f'read {N} points') + mesh = pd.DataFrame(data=mesh.reshape((N,3),order='F'), columns=['x','y','z']) + + if read_connectivity: + # pd.read_csv may read data in chunks, so reading the next line is not + # guaranteed to give what we want... reread + with open(fpath,'r') as f: + for _ in range(headerlength + 1 + 3*N): + f.readline() + element_type = f.readline().strip() + if not element_type == 'quad4': + print(f'WARNING: element type "{element_type}" not tested') + Ncell = int(f.readline()) + if chunksize is None: + conn = pd.read_csv(f,header=None,nrows=Ncell, + delim_whitespace=True) + else: + conn = pd.concat(pd.read_csv(f,header=None,nrows=Ncell, + delim_whitespace=True, + chunksize=chunksize)) + if verbose: + print(f'read connectivity data for {Ncell} cells') + # switch to 0-indexing + assert (conn.values.max() == N) + conn -= 1 + assert (conn.values.min() == 0) + # calculate cell centers + newindices = pd.RangeIndex(Ncell) + nodes = [mesh.loc[indices].set_index(newindices) for col,indices in conn.iteritems()] + cellcenters = sum(nodes) / len(nodes) + mesh = cellcenters + + return mesh + + +def read_vector(fpath,mesh,t=None,sort=False,headerlength=4,chunksize=None): + """Read Ensight data array (ascii) into a dataframe with combined mesh + information corresponding to the specified time; mesh should be read in by + the read_mesh() function + + Parameters + ---------- + t: float + Simulation time to associate with this file [s], useful if + reading many files with the intention of using pd.concat + sort: bool + Sort by x,y,z + headerlength: integer + Number of header lines to skip + chunksize: integer or None + Chunksize parameter for pd.read_csv, can speed up I/O + """ + Npts = len(mesh) + with open(fpath,'r') as f: + for _ in range(headerlength): + f.readline() + if chunksize is None: + vals = pd.read_csv(f,header=None,nrows=3*Npts).values + else: + vals = pd.concat(pd.read_csv(f,header=None,nrows=3*Npts,chunksize=chunksize)).values + df = mesh.copy() + uvw = pd.DataFrame(data=vals.reshape((Npts,3),order='F'), columns=['u','v','w']) + df = pd.concat([df,uvw], axis=1) + if t is not None: + df['t'] = t + df = df.set_index(['t','x','y','z']) + else: + df = df.set_index(['x','y','z']) + if sort: + df = df.sort_index() + return df diff --git a/mmctools/windtools/windtools/io/series.py b/windtools/windtools/io/series.py similarity index 100% rename from mmctools/windtools/windtools/io/series.py rename to windtools/windtools/io/series.py diff --git a/windtools/windtools/io/vtk.py b/windtools/windtools/io/vtk.py new file mode 100644 index 0000000..4dfc508 --- /dev/null +++ b/windtools/windtools/io/vtk.py @@ -0,0 +1,538 @@ +# Copyright 2021 NREL + +# Licensed under the Apache License, Version 2.0 (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy of the +# License at http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software distributed +# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +# CONDITIONS OF ANY KIND, either express or implied. See the License for the +# specific language governing permissions and limitations under the License. + +""" +Functions for handling VTKs +""" + +import struct +import vtk +from vtk.numpy_interface import dataset_adapter as dsa +from scipy.interpolate import griddata +import xarray as xr +import os +import datetime +import pandas as pd +import numpy as np + +def readVTK(vtkpath, sliceType=None, dateref=None, ti=None, tf=None, t=None, res=None, squash=None): + """ + Read VTK file(s) and interpolate into a uniform grid based on the limits read from the VTK + and resolution given. + + Function tailored to read multiple files with the following file structure: + full/path/to/ + ├── + ├── + │ ├── + │ ├── + │ ├── ... + │ └── + ├── ... + └── + + To read multiple VTKs, specify sliceType, ti, and tf; + To read a single VTK, there are two options: (i) specify full vtk path vtkpath, or + (ii) specity vtkpath, sliceType, and t. + + If reading more than a single VTK, it is assumed that they share the same coordinates. + + The function provides output for full 3-D VTK. If reading planar data, use the squash + option. + + Example calls: + # Read multiple + ds = readVTK('full/path/to/vtks', sliceType='U_zNormal.80.vtk', ti= 133200, tf=133205, + dateref= pd.to_datetime('2010-05-14 12:00:00', res=10) + # Read single + ds = readVTK('full/path/to/vtks/133200/U_zNormal.80.vtk', + dateref= pd.to_datetime('2010-05-14 12:00:00', res=10) + # Read single + ds = readVTK('full/path/to/vtks', sliceType='U_zNormal.80.vtk', t= 133200, + dateref= pd.to_datetime('2010-05-14 12:00:00', res=10) + + Parameters: + ----------- + vtkpath: str + Full path to the directory of time steps containing the VTK files (if reading + multiple); or full path to the vtk itself, with extension (if reading single) + sliceType: str + Common name of the collection of slices to be read. E.g. 'U_zNormal.80.vtk' + dateref: datetime + Reference time to be specified if datetime output is desired. Default is + float/integer output, representing seconds + ti, tf, t: int, float, str + Times directories that should be read. If a single time is specified, t, then + the nearest matching time-directory is read + res: int, float, or three-component tuple + resolution of the meshgrid in which the data will be interpolated onto. + If tuple, provide resolution in x, y, and z, in order + squash: str; 'x', 'y', or 'z' only + Squash the VTK into a plane (useful for terrain slices) + + Returns: + -------- + ds: xr.DataSet + Dataset containg the data with x, y [, z, time] as coordinates + + + written by Regis Thedin (regis.thedin@nrel.gov) + """ + + + # Single slice was requested, using `vtlpath='some/path/slice.vtk` + if os.path.isfile(vtkpath): + if isinstance(vtkpath, str): + if vtkpath.endswith('.vtk'): + print(f'Reading a single VTK. For output with `datetime` as coordinate, specify the path, sliceType, and t (see docstrings for example)') + x, y, z, out = readSingleVTK(vtkpath, res=res, squash=squash) + ds = VTK2xarray(x, y, z, out) + return ds + else: + raise SyntaxError("Single vtk specification using vtkpath='/path/to/file.vtk' should include the extension") + else: + raise SyntaxError("The vtkpath='path/to/vtks' should be a string.") + + + # Some checks + assert os.path.isdir(vtkpath), f'Directory of VTKs given {vtkpath} is not a directory. For single VTK, see docstrings for example.' + if not sliceType.endswith('.vtk'): + raise SyntaxError('sliceType should be given with .vtk extension') + if ti!=None and tf!=None: + if t!=None: raise ValueError('You can specify either t only, or ti and tf, but not all three') + if ti>tf: raise ValueError('tf should be larger than ti') + if ti==tf: + t = ti + elif tf!= None: + raise ValueError('If you specify tf, then ti should be specified too') + elif ti!= None: + raise ValueError('If you specify ti, then tf should be specified too') + else: + if t==None: + raise ValueError("You have to specify at least one time. If a single VTK is needed, use " \ + "vtkpath='path/to/file.vtk' ") + + # Get the time directories + times_str = sorted(os.listdir(vtkpath)) + times_float = [float(i) for i in times_str] + + if t is not None: + # Single time was requested + pos_t = min(range(len(times_float)), key=lambda i: abs(times_float[i]-t)) + if pos_t == 0 and t < 0.999*times_float[0]: + raise ValueError(f'No VTK found for time {t}. The first available VTK is at t={times_float[0]}') + if pos_t == len(times_str)-1 and t > 1.001*times_float[-1]: + raise ValueError(f'No VTK found for time {t}. The last available VTK is at t={times_float[-1]}') + t = times_str[pos_t] + print(f'Reading a single VTK for time {float(t)}') + x, y, z, out = readSingleVTK(os.path.join(vtkpath,t,sliceType), res=res, squash=squash) + ds = VTK2xarray(x, y, z, out, t, dateref) + return ds + + # Find limits of the requested subset + pos_ti = min(range(len(times_float)), key=lambda i: abs(times_float[i]-ti)) + pos_tf = min(range(len(times_float)), key=lambda i: abs(times_float[i]-tf)) + nvtk = pos_tf - pos_ti + + if nvtk == 0: + raise ValueError('No VTKs found for the time range specified. VTKs are available ' \ + f'between {times_float[0]} and {times_float[-1]}.') + + print(f'Number of VTKs to be read: {nvtk}') + + dslist = [] + for i, t in enumerate(times_str[pos_ti:pos_tf]): + print(f'Iteration {i}: processing time {t}... {100*i/nvtk:.2f}%', end='\r', flush=True) + x, y, z, out = readSingleVTK(os.path.join(vtkpath,t,sliceType), res=res, squash=squash) + current_ds = VTK2xarray(x, y, z, out, t, dateref) + dslist.append(current_ds) + + print('\nDone.') + + # Concatenate on the appropriate dimension + if 'time' in dslist[0].dims: + ds = xr.concat(dslist, dim='time') + elif 'datetime' in dslist[0].dims: + ds = xr.concat(dslist, dim='datetime') + + return ds + + + +def VTK2xarray(x, y, z, out, t=None, dateref=None): + """ + Convert x, y, z, and out from `readSingleVTK into a dataset. + If dateref is provided, then datetime is given in the output + + If plane of data, then the third direction is ommited and results + are given in terms of x and y (even if the data in on the yz plane). + If full 3-D data (no squash), then x, y, and z are given + + Parameters: + ----------- + x, y, z: array + Coordinate of the grid + out: nD-array + Values at the grid points. Size depends on the number of components + t: int, float, str (optional) + time to add to the DataSet as a coordinate + dateref: datetime (optional) + Reference time to be specified if datetime output is desired. Requires t + to be specified as well + + Returns: + -------- + ds: xr.DataSet + Dataset containg the data with x, y [, z, time] as coordinates + + written by Regis Thedin (regis.thedin@nrel.gov) + """ + + if dateref is not None and t is None: + raise SyntaxError('If dateref is specified, t should be specified too') + + if len(out) == 3: + # vector + ds = xr.Dataset({'u':(('x','y','z'), out[0,:]), 'v':(('x','y','z'), out[1,:]), 'w':(('x','y','z'), out[2,:])}) + elif len(out) == 1: + # scalar + ds = xr.Dataset({'var':(('x','y','z'), out[0,:])}) + elif len(out) == 6: + # tensor + ds = xr.Dataset({'var_xx':(('x','y','z'), out[0,:]), 'var_xy':(('x','y','z'), out[1,:]), 'var_xz':(('x','y','z'), out[2,:]), \ + 'var_yy':(('x','y','z'), out[3,:]), 'var_yz':(('x','y','z'), out[4,:]), 'var_zz':(('x','y','z'), out[5,:])}) + else: + raise ValueError(f'Read variable with {len(out)} components. Not sure how to follow. Stopping.') + + + ds = ds.assign_coords({'x':x, 'y':y, 'z':z}) + + # If data is on a plane (squash != None), get rid of the third dimesion + if len(ds.z) == 1: + ds = ds.squeeze(dim='z').drop_vars('z') + + if isinstance(t, (str, int, float)): + if type(dateref) in (datetime, datetime.datetime, pd.Timestamp): + t = pd.to_datetime(float(t), unit='s', origin=dateref).round('0.1S') + timename = 'datetime' + else: + t = float(t) + timename = 'time' + ds[timename] = t + ds = ds.expand_dims(timename).assign_coords({timename:[t]}) + + return ds + + + +def readSingleVTK(vtkfullpath, res=None, squash=None): + """ + Read a single VTK file and interpolates into a uniform grid based on the limits read from the VTK + + If requesting full 3-D field, it may be appropriate to give a tuple (resx, resy, resy) as input in `res` + + For full 3-D field, the function assumes the points are given within a structured grid of + either uniform resolution `res` or resolution (res[0],res[1],res[2]) in all directions. It uses + `griddata` for interpolation and thus a very large field will take significant time. + + Parameters: + ----------- + vtkfullpath: str + full path and filename of the desired VTK file + res: int, float, or three-component tuple + resolution of interpolated grid. If tuple, resolution in x, y, and z, in order + squash: str, 'x', 'y', or 'z' only + Squash the VTK into a plane (useful for terrain slices) + + Returns: + -------- + x, y, z: + meshgrid of x, y and z positions. + out: + array of shape (, , , ), where x and y are the long and short + direction, respectively. E.g. for Uz, out[2] + + written by Regis Thedin (regis.thedin@nrel.gov) + """ + + + # Check if the slice actually exists + if not os.path.isfile(vtkfullpath): + raise FileNotFoundError(f'Slice {os.path.basename(os.path.dirname(vtkfullpath))}/' \ + f'{os.path.basename(vtkfullpath)} does not exist.') + + # Open VTK + reader = vtk.vtkPolyDataReader() + reader.SetFileName(vtkfullpath) + reader.ReadAllVectorsOn() + reader.Update() + + # Load data + polydata = dsa.WrapDataObject(reader.GetOutput()) + ptdata = polydata.GetPointData() + coords = np.round(polydata.Points, 4) + data = ptdata.GetArray(0) + + # Determine what plane the VTK is in + if squash == None: + # Full 3-D field + dir1=0; dir2=1; + dirconst=2 + print('Reading full 3D VTK. It might take a while. If you are reading planes of data, use `squash`.') + elif squash=='x': + dirconst = 0 + dir1=1; dir2=2 + elif squash=='y': + dirconst = 1 + dir1=0; dir2=2 + elif squash=='z': + dirconst = 2 + dir1=0; dir2=1; + else: + raise ValueError("Squash is only available for 'x', 'y', or 'z'") + + + # OpenFOAM has a bug when saving VTK slices using the `type cuttingPlane` option in the dictionary. + # The bug results in a VTK where the coordinate points are no longer contained in a single plane. + # For the OpenFOAM-buggy VTK slice, a pattern does seem to exist. It is more clearly explained with + # an example: Consider a slice at y=4000 and the domain extents are from 0 to 6000 in y. The points + # are saved at 3 distinct planes: y=4000, y=6000, and another just under 6000, say 5998. The extra + # points that are not where they should be, are at/near the domain boundary _closest_ to the slice + # plane location. If the slice was, for example, at y=1000, then the boundary at y=0 will have the + # extra points. The only pattern that was identifiable is that 3 unique planes are created and the + # wrong ones are side by side. The idea followed here is that we identify the "isolated" plane and + # use it to give the coordinate of the actual sampled location. We recover the correct plane with + # the commands below. + # None of this is crucial to this function as is, since it is not returning any information about + # the plane the VTK is at, but rather only using that information for interpolation purposes. This + # is left here as a note for future extension of the function. + if squash in ['x','y','z']: + # For convenience, we use x and y to refer the dimension the slice varies + [xminbox, yminbox] = np.min(coords, 0)[[dir1, dir2]] + [xmaxbox, ymaxbox] = np.max(coords, 0)[[dir1, dir2]] + + uniqueCoords = np.unique(coords[:,dirconst]) + if np.argmax(np.diff(uniqueCoords, prepend=uniqueCoords[0])) == 1: + zlevel = uniqueCoords[0] + else: + zlevel = uniqueCoords[-1] + zminbox = zmaxbox = zlevel + + # Squash the data into the requested plane + coords[:,dirconst] = zlevel + + else: + [xminbox, yminbox, zminbox] = np.min(coords, 0)[[dir1, dir2, dirconst]] + [xmaxbox, ymaxbox, zmaxbox] = np.max(coords, 0)[[dir1, dir2, dirconst]] + + + + # Get the proper uniform or non-uniform resolution + if isinstance(res, (int, float)): + dx = dy = dz = res + elif isinstance(res, tuple): + try: dx=res[0]; dy=res[1]; dz=res[2] + except IndexError: print('Tuple must have 3 components'); raise + else: + raise ValueError('Resolution should be given as either a scalar (uniform '\ + 'resolution) or a 3-component tuple (resx, resy, resz).' ) + + + # Create the desired output grid + x1d = np.arange(xminbox,xmaxbox+dx,dx) + y1d = np.arange(yminbox,ymaxbox+dy,dy) + if squash in ['x','y','z']: + z1d = np.asarray([zlevel]) + else: + z1d = np.arange(zminbox, zmaxbox+dz, dz) + [x3d,y3d,z3d] = np.meshgrid(x1d, y1d, z1d, indexing='ij') + + n = np.ravel(x3d).shape[0] + coords_want = np.zeros(shape=(n,3)) + coords_want[:,dir1] = np.ravel(x3d) + coords_want[:,dir2] = np.ravel(y3d) + coords_want[:,dirconst] = np.ravel(z3d) + + # Interpolate values to given resolution + tmp = griddata(coords, data, coords_want, method='nearest') + + if tmp.size/n == 3: + # vector + out = np.reshape(np.vstack((tmp[:,0],tmp[:,1],tmp[:,2])), (3,x1d.size,y1d.size,z1d.size)) + elif tmp.size/n == 1: + # scalar + out=np.reshape(tmp,(1,x1d.size,y1d.size,z1d.size)) + elif tmp.size/n == 6: + # tensor + out=np.reshape(np.vstack((tmp[:,0],tmp[:,1],tmp[:,2],tmp[:,3],tmp[:,4],tmp[:,5])), (6,x1d.size,y1d.size,z1d.size)) + else: + # something else + raise NotImplementedError(f'Found variable with {tmp.size} components. Not sure how to follow. Stopping.') + + return x1d, y1d, z1d, out + + + +def vtk_write_structured_points( f, + datadict, + ds=None,dx=None,dy=None,dz=None, + origin=(0.0,0.0,0.0), + indexorder='ijk', + vtk_header='# vtk DataFile Version 2.0', + vtk_datatype='float', + vtk_description='really cool data' + ): + """Write a VTK dataset with regular topology to file handle 'f' + written by Eliot Quon (eliot.quon@nrel.gov) + + Inputs are written with x increasing fastest, then y, then z. + + Example: Writing out two vector fields in one VTK file. + ``` + from windtools.io.vtk import vtk_write_structured_points + with open('some_data.vtk','wb') as f: + vtk_write_structured_points(f, + {'vel':np.stack((u,v,w))}, + ds=1.0, + indexorder='ijk') + ``` + + Parameters + ---------- + datadict : dict + Dictionary with keys as the data names. Data are either scalar + fields with shape (nx,ny,nz) or vector fields with shape + (3,nx,ny,nz). + ds : float, optional + Default grid spacing; dx,dy,dz may be specified to override + dx, dy, dz : float, optional + Specific grid spacings; if ds is not specified, then all three + must be specified + origin : list-like, optional + Origin of the grid + indexorder: str + Specify the indexing convention (standard: 'ijk', TTUDD: 'jik') + + @author: ewquon + """ + # calculate grid spacings if needed + if ds: + if not dx: dx = ds + if not dy: dy = ds + if not dz: dz = ds + else: + assert( dx > 0 and dy > 0 and dz > 0 ) + + # check data + nx = ny = nz = None + datatype = {} + for name,data in datadict.items(): + dims = data.shape + if len(dims) == 3: + datatype[name] = 'scalar' + elif len(dims) == 4: + assert dims[0] == 3 + datatype[name] = 'vector' + else: + raise ValueError('Unexpected "'+name+'" array shape: '+str(data.shape)) + if nx is None: + nx = dims[-3] + ny = dims[-2] + nz = dims[-1] + else: + assert (nx==dims[-3]) and (ny==dims[-2]) and (nz==dims[-1]) + + # write header + if 'b' in f.mode: + binary = True + import struct + if bytes is str: + # python 2 + def b(s): + return str(s) + else: + # python 3 + def b(s): + return bytes(s,'utf-8') + f.write(b(vtk_header+'\n')) + f.write(b(vtk_description+'\n')) + f.write(b('BINARY\n')) + f.write(b('DATASET STRUCTURED_POINTS\n')) + + # write out mesh descriptors + f.write(b('DIMENSIONS {:d} {:d} {:d}\n'.format(nx,ny,nz))) + f.write(b('ORIGIN {:f} {:f} {:f}\n'.format(origin[0],origin[1],origin[2]))) + f.write(b('SPACING {:f} {:f} {:f}\n'.format(dx,dy,dz))) + + # write out data + f.write(b('POINT_DATA {:d}\n'.format(nx*ny*nz))) + + else: + binary = False + f.write(vtk_header+'\n') + f.write(vtk_description+'\n') + f.write('ASCII\n') + f.write('DATASET STRUCTURED_POINTS\n') + + # write out mesh descriptors + f.write('DIMENSIONS {:d} {:d} {:d}\n'.format(nx,ny,nz)) + f.write('ORIGIN {:f} {:f} {:f}\n'.format(origin[0],origin[1],origin[2])) + f.write('SPACING {:f} {:f} {:f}\n'.format(dx,dy,dz)) + + # write out data + f.write('POINT_DATA {:d}\n'.format(nx*ny*nz)) + + for name,data in datadict.items(): + outputtype = datatype[name] + if outputtype=='vector': + u = data[0,:,:,:] + v = data[1,:,:,:] + w = data[2,:,:,:] + elif outputtype=='scalar': + u = data + else: + raise ValueError('Unexpected data type '+outputtype) + + name = name.replace(' ','_') + + mapping = { 'i': range(nx), 'j': range(ny), 'k': range(nz) } + ijkranges = [ mapping[ijk] for ijk in indexorder ] + + if outputtype=='vector': + if binary: + f.write(b('{:s}S {:s} {:s}\n'.format(outputtype.upper(),name,vtk_datatype))) + for k in ijkranges[2]: + for j in ijkranges[1]: + for i in ijkranges[0]: + f.write(struct.pack('>fff', u[i,j,k], v[i,j,k], w[i,j,k])) # big endian + else: #ascii + f.write('{:s}S {:s} {:s}\n'.format(outputtype.upper(),name,vtk_datatype)) + for k in ijkranges[2]: + for j in ijkranges[1]: + for i in ijkranges[0]: + f.write(' {:f} {:f} {:f}\n'.format(u[i,j,k], v[i,j,k], w[i,j,k])) + elif outputtype=='scalar': + if binary: + f.write(b('{:s}S {:s} {:s}\n'.format(outputtype.upper(),name,vtk_datatype))) + f.write(b('LOOKUP_TABLE default\n')) + for k in ijkranges[2]: + for j in ijkranges[1]: + for i in ijkranges[0]: + #f.write(struct.pack('f',u[j,i,k])) # native endianness + f.write(struct.pack('>f',u[i,j,k])) # big endian + else: + f.write('{:s}S {:s} {:s}\n'.format(outputtype.upper(),name,vtk_datatype)) + f.write('LOOKUP_TABLE default\n') + for k in ijkranges[2]: + for j in ijkranges[1]: + for i in ijkranges[0]: + f.write(' {:f}\n'.format(u[i,j,k])) + diff --git a/mmctools/windtools/windtools/openfast.py b/windtools/windtools/openfast.py similarity index 100% rename from mmctools/windtools/windtools/openfast.py rename to windtools/windtools/openfast.py diff --git a/mmctools/windtools/windtools/openfoam.py b/windtools/windtools/openfoam.py similarity index 90% rename from mmctools/windtools/windtools/openfoam.py rename to windtools/windtools/openfoam.py index d32899a..75b5ebf 100644 --- a/mmctools/windtools/windtools/openfoam.py +++ b/windtools/windtools/openfoam.py @@ -9,6 +9,7 @@ # CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. +import os import re @@ -46,7 +47,7 @@ class InputFile(dict): 'table', ] - def __init__(self,fpath,nodef=False): + def __init__(self,fpath,nodef=False,include=False): """Create a dictionary of definitions from an OpenFOAM-style input file. @@ -59,7 +60,12 @@ def __init__(self,fpath,nodef=False): vector values to be included from another OpenFOAM file, then create a generic 'data' parent object to contain the file data. + include : bool, optional + Read additional input files in place specified by '#include' """ + self.fpath = fpath + self.nodef = nodef + self.include = include # read full file with open(fpath) as f: lines = f.readlines() @@ -68,7 +74,9 @@ def __init__(self,fpath,nodef=False): # trim single-line comments and remove directives for i,line in enumerate(lines): line = line.strip() - if line.startswith('#'): + if include and line.startswith('#include'): + lines[i] = line + elif line.startswith('#'): if self.DEBUG: print('Ignoring directive:',line) lines[i] = '' @@ -159,7 +167,11 @@ def _split_defs(self,txt): assert (idx > 0), 'problem parsing '+string+' field' string = txt[:idx].strip() - if string.endswith(';'): + if name == '#include': + names.append(name) + lines.append(string) + container.append(None) + elif string.endswith(';'): # found single definition if self.DEBUG: print('value=',string[:-1]) names.append(name) @@ -219,7 +231,20 @@ def _parse(self,name,defn,containertype,parent=None): if containertype is not None: print('container type:',containertype) defn = defn.strip() - if containertype is None: + if name == '#include': + includedfile = os.path.join(os.path.split(self.fpath)[0], + defn.strip('"')) + if self.DEBUG: + print('PARSING included file',includedfile) + tmp = InputFile(includedfile) + for key,val in tmp.items(): + if parent is None: + # parent is the InputFile object + self.__setitem__(key,val) + else: + assert isinstance(parent, dict) + parent[key] = val + elif containertype is None: # set single value in parent defn = self._try_cast(defn) # SET VALUE HERE diff --git a/mmctools/windtools/windtools/plotting.py b/windtools/windtools/plotting.py similarity index 100% rename from mmctools/windtools/windtools/plotting.py rename to windtools/windtools/plotting.py