diff --git a/modules/performFEM/OpenSeesPy/createOpenSeesPyDriver.cpp b/modules/performFEM/OpenSeesPy/createOpenSeesPyDriver.cpp index e918ddbcb..c81a3f641 100644 --- a/modules/performFEM/OpenSeesPy/createOpenSeesPyDriver.cpp +++ b/modules/performFEM/OpenSeesPy/createOpenSeesPyDriver.cpp @@ -310,7 +310,7 @@ int createDriver(int argc, const char **argv) { std::ifstream modelFile(mainScript); std::string line; while (std::getline(modelFile, line)) { - std::cout << line << std::endl; // Print line to console + // std::cout << line << std::endl; // Print line to console templateFile << line << std::endl; // Write line to template file } templateFile.close(); diff --git a/modules/performUQ/UCSD_UQ/UCSD_UQ.py b/modules/performUQ/UCSD_UQ/UCSD_UQ.py index 0b7984be2..42260a386 100644 --- a/modules/performUQ/UCSD_UQ/UCSD_UQ.py +++ b/modules/performUQ/UCSD_UQ/UCSD_UQ.py @@ -60,10 +60,13 @@ def main(args): # noqa: D103 text=True, check=False, ) - result.check_returncode() + result.check_returncode() # Raises CalledProcessError if return code is non-zero except subprocess.CalledProcessError: with open(err_file, 'a') as f: # noqa: PTH123 f.write(f'ERROR: {result.stderr}') + else: + # Print success if no error occurs + print('SUCCESS') # noqa: T201 if __name__ == '__main__': diff --git a/modules/performUQ/UCSD_UQ/mainscript.py b/modules/performUQ/UCSD_UQ/mainscript.py index 1a93bf504..4d55e3551 100644 --- a/modules/performUQ/UCSD_UQ/mainscript.py +++ b/modules/performUQ/UCSD_UQ/mainscript.py @@ -7,6 +7,7 @@ import json import shlex import sys +import traceback from pathlib import Path path_to_common_uq = Path(__file__).parent.parent / 'common' @@ -64,10 +65,11 @@ def main(input_args): # noqa: D103 # Try running the main_function and catch any exceptions try: main_function(command_list) - except Exception as e: # noqa: BLE001 + except Exception: # noqa: BLE001 # Write the exception message to the .err file with err_file.open('a') as f: - f.write(f'ERROR: {e!s}\n') + f.write('ERROR: An exception occurred:\n') + f.write(f'{traceback.format_exc()}\n') # ====================================================================================================================== diff --git a/modules/performUQ/UCSD_UQ/mainscript_hierarchical_bayesian.py b/modules/performUQ/UCSD_UQ/mainscript_hierarchical_bayesian.py index 48ec64caf..16fd08ccf 100644 --- a/modules/performUQ/UCSD_UQ/mainscript_hierarchical_bayesian.py +++ b/modules/performUQ/UCSD_UQ/mainscript_hierarchical_bayesian.py @@ -68,7 +68,7 @@ def loglikelihood_function(residual, error_variance_sample): # noqa: D103 return ll -def main(input_args): # noqa: D103 +def main(input_args): # noqa: C901, D103 # Initialize analysis working_directory = Path(input_args[0]).resolve() template_directory = Path(input_args[1]).resolve() # noqa: F841 @@ -78,24 +78,38 @@ def main(input_args): # noqa: D103 # input_file_full_path = template_directory / input_file - with open(input_file, encoding='utf-8') as f: # noqa: PTH123 - inputs = json.load(f) - - uq_inputs = inputs['UQ'] - rv_inputs = inputs['randomVariables'] - edp_inputs = inputs['EDP'] - - ( - parallel_pool, - function_to_evaluate, - joint_distribution, - num_rv, - num_edp, - list_of_model_evaluation_functions, - list_of_datasets, - list_of_dataset_lengths, - restart_file, - ) = preprocess_hierarchical_bayesian.preprocess_arguments(input_args) + try: + with open(input_file, encoding='utf-8') as f: # noqa: PTH123 + inputs = json.load(f) + + uq_inputs = inputs['UQ'] + rv_inputs = inputs['randomVariables'] + edp_inputs = inputs['EDP'] + except FileNotFoundError as fnf_error: + msg = f"Input file '{input_file}' not found. Please check the file path." + raise FileNotFoundError(msg) from fnf_error + except json.JSONDecodeError as json_error: + msg = f"Error decoding JSON from file '{input_file}'. Ensure the file contains valid JSON." + raise ValueError(msg) from json_error + except KeyError as key_error: + msg = f'Missing required key in JSON data: {key_error}. Please check the input file format.' + raise KeyError(msg) from key_error + + try: + ( + parallel_pool, + function_to_evaluate, + joint_distribution, + num_rv, + num_edp, + list_of_model_evaluation_functions, + list_of_datasets, + list_of_dataset_lengths, + restart_file, + ) = preprocess_hierarchical_bayesian.preprocess_arguments(input_args) + except Exception as e: + msg = "Error during the preprocessing of arguments in 'preprocess_hierarchical_bayesian'." + raise RuntimeError(msg) from e transformation_function = joint_distribution.u_to_x prior_inverse_gamma_parameters = uq_utilities.InverseGammaParameters( diff --git a/modules/performUQ/UCSD_UQ/runTMCMC.py b/modules/performUQ/UCSD_UQ/runTMCMC.py index 14e571423..7911a2b71 100644 --- a/modules/performUQ/UCSD_UQ/runTMCMC.py +++ b/modules/performUQ/UCSD_UQ/runTMCMC.py @@ -183,7 +183,7 @@ def write_data_to_csvfile( # noqa: D103 # Finished writing data -def run_TMCMC( # noqa: N802, PLR0913 +def run_TMCMC( # noqa: C901, N802, PLR0913 number_of_samples, number_of_chains, all_distributions_list, @@ -264,11 +264,11 @@ def run_TMCMC( # noqa: N802, PLR0913 # Evaluate log-likelihood at current samples Sm if run_type == 'runningLocal': - processor_count = mp.cpu_count() - if processor_count > 32: + max_num_processes = 32 # max number of processes to use for multiprocessing when running locally + if processor_count > max_num_processes: processor_count = 8 - + pool = Pool(processes=processor_count) write_eval_data_to_logfile( logfile, @@ -276,13 +276,13 @@ def run_TMCMC( # noqa: N802, PLR0913 run_type, proc_count=processor_count, stage_num=stage_number, - ) + ) outputs = pool.starmap(runFEM, iterables) - + # pool does not start - #mp.set_start_method('forkserver', force=True) - #processor_count = mp.cpu_count() - #with mp.Pool(processes=processor_count) as pool: + # mp.set_start_method('forkserver', force=True) + # processor_count = mp.cpu_count() + # with mp.Pool(processes=processor_count) as pool: # write_eval_data_to_logfile( # logfile, # parallelize_MCMC, @@ -292,8 +292,8 @@ def run_TMCMC( # noqa: N802, PLR0913 # ) # outputs = pool.starmap(runFEM, iterables) - #mp.set_start_method('spawn') - #with mp.Pool(processes=processor_count) as pool: + # mp.set_start_method('spawn') + # with mp.Pool(processes=processor_count) as pool: # write_eval_data_to_logfile( # logfile, # parallelize_MCMC, @@ -302,7 +302,7 @@ def run_TMCMC( # noqa: N802, PLR0913 # stage_num=stage_number, # ) # outputs = pool.starmap(runFEM, iterables) - + log_likelihoods_list = [] predictions_list = [] for output in outputs: diff --git a/modules/performUQ/common/CMakeLists.txt b/modules/performUQ/common/CMakeLists.txt index a70a5fa06..e8c02bcac 100644 --- a/modules/performUQ/common/CMakeLists.txt +++ b/modules/performUQ/common/CMakeLists.txt @@ -14,4 +14,13 @@ target_include_directories(commonUQ PUBLIC ${CONAN_INCLUDE_DIRS_JANSSON}) simcenter_add_python_script(SCRIPT uq_utilities.py) simcenter_add_python_script(SCRIPT quoFEM_RV_models.py) simcenter_add_python_script(SCRIPT parallel_runner_mpi4py.py) -add_subdirectory(ERAClasses) \ No newline at end of file +add_subdirectory(ERAClasses) + +simcenter_add_python_script(SCRIPT adaptive_doe.py) +simcenter_add_python_script(SCRIPT common_datamodels.py) +simcenter_add_python_script(SCRIPT convergence_metrics.py) +simcenter_add_python_script(SCRIPT gp_ab_algorithm.py) +simcenter_add_python_script(SCRIPT gp_model.py) +simcenter_add_python_script(SCRIPT principal_component_analysis.py) +simcenter_add_python_script(SCRIPT space_filling_doe.py) +simcenter_add_python_script(SCRIPT tmcmc.py) \ No newline at end of file diff --git a/modules/performUQ/common/tmcmc.py b/modules/performUQ/common/tmcmc.py index a375db6e5..6dd06dca9 100644 --- a/modules/performUQ/common/tmcmc.py +++ b/modules/performUQ/common/tmcmc.py @@ -3,6 +3,7 @@ import math import numpy as np +from scipy.optimize import root_scalar from scipy.special import logsumexp @@ -22,8 +23,7 @@ def _calculate_weights_warm_start( np.ndarray: The calculated weights. """ log_weights = beta * (current_loglikelihoods - previous_loglikelihoods) - log_sum_weights = logsumexp(log_weights) - normalized_log_weights = log_weights - log_sum_weights + normalized_log_weights = log_weights - np.max(log_weights) normalized_weights = np.exp(normalized_log_weights) weights = normalized_weights / np.sum(normalized_weights) return weights # noqa: RET504 @@ -73,8 +73,7 @@ def _calculate_weights(beta_increment, log_likelihoods): np.ndarray: The calculated weights. """ log_weights = beta_increment * log_likelihoods - log_sum_weights = logsumexp(log_weights) - normalized_log_weights = log_weights - log_sum_weights + normalized_log_weights = log_weights - np.max(log_weights) normalized_weights = np.exp(normalized_log_weights) weights = normalized_weights / np.sum(normalized_weights) return weights # noqa: RET504 @@ -100,7 +99,7 @@ def _calculate_log_evidence(beta_increment, log_likelihoods): def _increment_beta(log_likelihoods, beta, threshold_cov=1): """ - Increment the beta value based on the coefficient of variation of weights. + Attempt to increment beta using optimization. If optimization fails, fall back to trial-and-error. Args: log_likelihoods (np.ndarray): The log-likelihood values. @@ -111,15 +110,42 @@ def _increment_beta(log_likelihoods, beta, threshold_cov=1): ------- float: The new beta value. """ - if beta >= 1: - return 1 + + def cov_objective(beta_increment): + weights = _calculate_weights(beta_increment, log_likelihoods) + cov_weights = np.nanstd(weights) / np.nanmean(weights) + return cov_weights - threshold_cov + + # print(f'{cov_objective(0) = }, {cov_objective(1 - beta) = }') + # Check if optimization method is feasible + if np.sign(cov_objective(0)) == np.sign(cov_objective(1 - beta)): + # If signs are the same, set beta to its maximum possible value of 1 + # print('Optimization not feasible. Setting beta to 1.0') + # wts = _calculate_weights(1 - beta, log_likelihoods) + # print(f'cov_weights at new beta = {np.nanstd(wts) / np.nanmean(wts)}') + return 1.0 + + # Try optimization method first + result = root_scalar(cov_objective, bracket=[0, 1 - beta], method='bisect') + + if result.converged: + # If optimization succeeds, calculate the new beta + new_beta = min(beta + result.root, 1) + # wts = _calculate_weights(result.root, log_likelihoods) + # print(f'cov_weights at new beta = {np.nanstd(wts) / np.nanmean(wts)}') + return new_beta # noqa: RET504 + + # Fallback to trial-and-error approach if optimization fails + # print('Optimization failed. Fallback to trial-and-error approach.') beta_increment = 1 - beta weights = _calculate_weights(beta_increment, log_likelihoods) cov_weights = np.nanstd(weights) / np.nanmean(weights) + while cov_weights > threshold_cov: beta_increment = 0.99 * beta_increment weights = _calculate_weights(beta_increment, log_likelihoods) cov_weights = np.nanstd(weights) / np.nanmean(weights) + proposed_beta = beta + beta_increment new_beta = min(proposed_beta, 1) return new_beta # noqa: RET504 @@ -164,7 +190,7 @@ def __init__( threshold_cov=1, num_steps=1, thinning_factor=10, - adapt_frequency=100, + adapt_frequency=50, ): """ Initialize the TMCMC class. @@ -185,7 +211,11 @@ def __init__( self.thinning_factor = thinning_factor self.adapt_frequency = adapt_frequency - def _run_one_stage( + @staticmethod + def _generate_error_message(size): + return f'Expected a single value, but got {size} values.' + + def _run_one_stage( # noqa: C901 self, samples, log_likelihoods, @@ -219,6 +249,7 @@ def _run_one_stage( ------- tuple: A tuple containing the new samples, new log-likelihoods, new log-target density values, new beta, and log evidence. """ + # new_beta = _increment_beta(log_likelihoods, beta) new_beta = _increment_beta(log_likelihoods, beta) log_evidence = _calculate_log_evidence(new_beta - beta, log_likelihoods) weights = _calculate_weights(new_beta - beta, log_likelihoods) @@ -226,6 +257,13 @@ def _run_one_stage( proposal_covariance = _get_scaled_proposal_covariance( samples, weights, scale_factor ) + try: + cholesky_lower_triangular_matrix = np.linalg.cholesky( + proposal_covariance + ) + except np.linalg.LinAlgError as exc: + msg = f'Cholesky decomposition failed: {exc}' + raise RuntimeError(msg) from exc new_samples = np.zeros_like(samples) new_log_likelihoods = np.zeros_like(log_likelihoods) @@ -239,13 +277,15 @@ def _run_one_stage( num_accepts = 0 n_adapt = 1 step_count = 0 + num_steps = self.num_steps + # print(f'{new_beta = }, {do_thinning = }, {num_steps = }') for k in range(burn_in_steps + num_samples): - # print(f'{k=}') index = rng.choice(num_samples, p=weights.flatten()) if k >= burn_in_steps: if new_beta == 1 or do_thinning: - self.num_steps = self.num_steps * self.thinning_factor - for _ in range(self.num_steps): + num_steps = self.num_steps * self.thinning_factor + # print(f'{new_beta = }, {do_thinning = }, {num_steps = }') + for _ in range(num_steps): step_count += 1 if step_count % self.adapt_frequency == 0: acceptance_rate = num_accepts / self.adapt_frequency @@ -258,10 +298,18 @@ def _run_one_stage( proposal_covariance = _get_scaled_proposal_covariance( current_samples, weights, scale_factor ) + cholesky_lower_triangular_matrix = np.linalg.cholesky( + proposal_covariance + ) - proposed_state = rng.multivariate_normal( - current_samples[index, :], proposal_covariance + standard_normal_samples = rng.standard_normal( + size=current_samples.shape[1] + ) + proposed_state = ( + current_samples[index, :] + + cholesky_lower_triangular_matrix @ standard_normal_samples ).reshape(1, -1) + log_likelihood_at_proposed_state = log_likelihood_function( proposed_state ) @@ -277,9 +325,25 @@ def _run_one_stage( if accept: num_accepts += 1 current_samples[index, :] = proposed_state - current_log_likelihoods[index] = log_likelihood_at_proposed_state + # current_log_likelihoods[index] = log_likelihood_at_proposed_state + if log_likelihood_at_proposed_state.size != 1: + msg = self._generate_error_message( + log_likelihood_at_proposed_state.size + ) + raise ValueError(msg) + current_log_likelihoods[index] = ( + log_likelihood_at_proposed_state.item() + ) + # current_log_target_density_values[index] = ( + # log_target_density_at_proposed_state + # ) + if log_target_density_at_proposed_state.size != 1: + msg = self._generate_error_message( + log_target_density_at_proposed_state.size + ) + raise ValueError(msg) current_log_target_density_values[index] = ( - log_target_density_at_proposed_state + log_target_density_at_proposed_state.item() ) if k >= burn_in_steps: weights = _calculate_weights( @@ -334,6 +398,7 @@ def run( self.scale_factor = 2.4 / np.sqrt(self.num_dimensions) while betas_dict[stage_num] < 1: # print(f'Stage {stage_num}') + # print(f'Beta: {betas_dict[stage_num]}') ( new_samples, new_log_likelihoods, @@ -367,3 +432,72 @@ def run( log_target_density_values_dict, log_evidence_dict, ) + + +if __name__ == '__main__': + import numpy as np + from scipy import stats + + # Define log-likelihood function (2D Gaussian) + def _log_likelihood_approximation_function(samples): + # Assuming a Gaussian log-likelihood with mean (0, 0) and covariance identity + return -0.5 * np.sum((samples - 10) ** 2, axis=1) + + # Define log-posterior function (assume same as log-likelihood for this simple case) + def _log_target_density_approximation_function(samples, log_likelihoods): # noqa: ARG001 + return log_likelihoods # In this simple case, they are the same + + # Initialize the TMCMC sampler + tmcmc_sampler = TMCMC( + _log_likelihood_approximation_function, + _log_target_density_approximation_function, + threshold_cov=1, + num_steps=1, + thinning_factor=5, + adapt_frequency=50, + ) + + # Initial parameters + num_samples = 2000 # Number of samples + num_dimensions = 2 # Dimensionality of the target distribution + rng = np.random.default_rng(42) # Random number generator + + # Start with some random samples + initial_samples = rng.normal(size=(num_samples, num_dimensions)) + initial_log_likelihoods = _log_likelihood_approximation_function(initial_samples) + initial_log_target_density_values = initial_log_likelihoods + + # Dictionaries to store results for each stage + samples_dict = {0: initial_samples} + betas_dict = {0: 0.0} # Start with beta=0 (prior importance) + log_likelihoods_dict = {0: initial_log_likelihoods} + log_target_density_values_dict = {0: initial_log_target_density_values} + log_evidence_dict = {0: 0} + + # Run TMCMC + stage_num = 0 + ( + samples_dict, + betas_dict, + log_likelihoods_dict, + log_target_density_values_dict, + log_evidence_dict, + ) = tmcmc_sampler.run( + samples_dict, + betas_dict, + log_likelihoods_dict, + log_target_density_values_dict, + log_evidence_dict, + rng, + stage_num, + num_burn_in=100, + ) + + # Display results + final_stage_num = max(samples_dict.keys()) + print( # noqa: T201 + f'Final samples (stage {final_stage_num}): \n{samples_dict[final_stage_num]}' + ) + print(f'Betas: {betas_dict.values()}') # noqa: T201 + print(f'Log-evidence values: {log_evidence_dict.values()}') # noqa: T201 + print(f'Total log-evidence: {sum(log_evidence_dict.values())}') # noqa: T201 diff --git a/modules/performUQ/common/uq_utilities.py b/modules/performUQ/common/uq_utilities.py index fb19ca665..938944a53 100644 --- a/modules/performUQ/common/uq_utilities.py +++ b/modules/performUQ/common/uq_utilities.py @@ -20,7 +20,7 @@ import quoFEM_RV_models -def _copytree(src, dst, symlinks=False, ignore=None): # noqa: FBT002 +def _copytree(src, dst, symlinks=False, ignore=None) -> str: # noqa: FBT002 if not os.path.exists(dst): # noqa: PTH110 os.makedirs(dst) # noqa: PTH103 for item in os.listdir(src): @@ -41,7 +41,7 @@ def _copytree(src, dst, symlinks=False, ignore=None): # noqa: FBT002 return '0' -def _append_msg_in_out_file(msg, out_file_name: str = 'ops.out'): +def _append_msg_in_out_file(msg, out_file_name: str = 'ops.out') -> str: if glob.glob(out_file_name): # noqa: PTH207 with open(out_file_name) as text_file: # noqa: PTH123 error_FEM = text_file.read() # noqa: N806 @@ -93,21 +93,13 @@ def __init__( def _check_size_of_sample(self, sample_values: NDArray) -> None: num_samples = len(sample_values) if num_samples > 1: - msg = ( - f'Do one simulation at a time. There were {num_samples} ' - ' samples provided in the sample value' - f' {sample_values}.' - ) + msg = f'Do one simulation at a time. There were {num_samples} samples provided in the sample value {sample_values}.' raise ModelEvaluationError(msg) for i in range(num_samples): num_values_in_each_sample = len(sample_values[i]) if num_values_in_each_sample != self.num_rv: - msg = ( - f'Expected {self.num_rv} values in each sample, found ' - f' {num_values_in_each_sample} in' - f' {sample_values}.' - ) + msg = f'Expected {self.num_rv} values in each sample, found {num_values_in_each_sample} in {sample_values}.' raise ModelEvaluationError(msg) def _create_workdir(self, simulation_number: int) -> str: @@ -117,22 +109,19 @@ def _create_workdir(self, simulation_number: int) -> str: ) if os.path.exists(workdir): # noqa: PTH110 for root, dirs, files in os.walk(workdir): - for file in files: - try: + try: + for file in files: os.chmod(os.path.join(root, file), 0o777) # noqa: S103, PTH101, PTH118 os.unlink(os.path.join(root, file)) # noqa: PTH108, PTH118 - except: # noqa: PERF203, E722 - msg = f'Could not remove file {file} from {workdir}.' - raise ModelEvaluationError(msg) # noqa: B904 - for dir in dirs: # noqa: A001 - try: - shutil.rmtree(os.path.join(root, dir)) # noqa: PTH118 - except: # noqa: PERF203, E722 - msg = ( - f'Could not remove directory {dir} ' - f' from {workdir}.' - ) - raise ModelEvaluationError(msg) # noqa: B904 + except Exception as ex: + msg = f'Could not remove file {file} from {workdir}.' + raise ModelEvaluationError(msg) from ex + try: + for directory in dirs: + shutil.rmtree(os.path.join(root, directory)) # noqa: PTH118 + except Exception as ex: + msg = f'Could not remove directory {directory} from {workdir}.' + raise ModelEvaluationError(msg) from ex for src_dir in self.list_of_dir_names_to_copy_files_from: src = os.path.join(self.full_path_of_tmpSimCenter_dir, src_dir) # noqa: PTH118 @@ -149,27 +138,21 @@ def _create_params_file(self, sample_values: NDArray, workdir: str) -> None: try: with open(os.path.join(workdir, 'params.in'), 'w') as f: # noqa: PTH118, PTH123 f.write('\n'.join(list_of_strings_to_write)) - except Exception as ex: # noqa: BLE001 - raise ModelEvaluationError( # noqa: B904, TRY003 - 'Failed to create params.in file in ' # noqa: EM102 - f' {workdir}. The following error occurred: \n{ex}' - ) + except Exception as exc: + msg = f'Failed to create params.in file in {workdir}. The following error occurred: \n{exc}' + raise ModelEvaluationError(msg) from exc def _execute_driver_file(self, workdir: str) -> None: command = ( - f'{os.path.join(workdir, self.driver_filename)} ' # noqa: PTH118 - ' 1> model_eval.log 2>&1' + f'{os.path.join(workdir, self.driver_filename)} 1> model_eval.log 2>&1' # noqa: PTH118 ) os.chdir(workdir) completed_process = subprocess.run(command, shell=True, check=False) # noqa: S602 try: completed_process.check_returncode() except subprocess.CalledProcessError as ex: - returnStringList = ['Failed to run the model.'] # noqa: N806 - returnStringList.append( - 'The command to run the model was ' - f' {ex.cmd}' - ) + returnStringList = [f'Failed to run the model in {workdir}.'] # noqa: N806 + returnStringList.append(f'The command to run the model was {ex.cmd}') returnStringList.append(f'The return code was {ex.returncode}') returnStringList.append(f'The following error occurred: \n{ex}') raise ModelEvaluationError('\n\n'.join(returnStringList)) # noqa: B904 @@ -178,27 +161,23 @@ def _read_outputs_from_results_file(self, workdir: str) -> NDArray: if glob.glob('results.out'): # noqa: PTH207 outputs = np.loadtxt('results.out', dtype=float).flatten() else: - msg = f"Error running FEM: 'results.out' missing at {workdir}\n" + msg = f"Error running FEM: 'results.out' missing in {workdir}\n" msg = _append_msg_in_out_file(msg, out_file_name='ops.out') raise ModelEvaluationError(msg) if outputs.shape[0] == 0: - msg = "Error running FEM: 'results.out' is empty\n" + msg = f"Error running FEM: in {workdir}, 'results.out' is empty\n" msg = _append_msg_in_out_file(msg, out_file_name='ops.out') raise ModelEvaluationError(msg) if outputs.shape[0] != self.length_of_results: - msg = ( - "Error running FEM: 'results.out' contains " - f' {outputs.shape[0]} values, expected to get ' - f' {self.length_of_results} values\n' - ) + msg = f"Error running FEM: in {workdir}, 'results.out' contains {outputs.shape[0]} values, expected to get {self.length_of_results} values\n" msg = _append_msg_in_out_file(msg, out_file_name='ops.out') raise ModelEvaluationError(msg) if not self.ignore_nans: if np.isnan(np.sum(outputs)): - msg = f'Error running FEM: Response value in {workdir} is NaN' + msg = f"Error running FEM: 'results.out' in {workdir} contains NaN" raise ModelEvaluationError(msg) return outputs @@ -214,15 +193,22 @@ def evaluate_model_once( # noqa: D102 self._create_params_file(sample_values, workdir) self._execute_driver_file(workdir) outputs = self._read_outputs_from_results_file(workdir) - except Exception: # noqa: BLE001 + except ModelEvaluationError as model_err: + # Custom error handling for ModelEvaluationError + msg = f'\nIn workdir: {workdir}\n' f'Error: {model_err!s}\n' + raise ModelEvaluationError(msg) from model_err + except Exception as exc: + # Catch all other exceptions, show full traceback and message exc_type, exc_value, exc_traceback = sys.exc_info() - outputs = ( - f'\nSimulation number: {simulation_number}\n' # noqa: ISC003 - + f'Samples values: {sample_values}\n' - ) - outputs += ''.join( + formatted_traceback = ''.join( traceback.format_exception(exc_type, exc_value, exc_traceback) ) + msg = ( + f'\nIn workdir: {workdir}\n' + f'Encountered an error:\n{exc}\n\n' + f'Traceback:\n{formatted_traceback}' + ) + raise RuntimeError(msg) from exc # Chain RuntimeError with traceback finally: os.chdir(self.full_path_of_tmpSimCenter_dir) return outputs @@ -236,17 +222,16 @@ def __init__(self, run_type: str = 'runningLocal') -> None: def get_num_processors(self) -> int: # noqa: D102 num_processors = os.cpu_count() + max_num_processors = 32 # max number of processors to use in multiprocessing when running locally if num_processors is None: num_processors = 1 elif num_processors < 1: - raise ValueError( # noqa: TRY003 - 'Number of processes must be at least 1. ' # noqa: EM102 - f' Got {num_processors}' - ) - elif num_processors > 32: + msg = f'Number of processes must be at least 1. Got {num_processors}.' + raise ValueError(msg) + elif num_processors > max_num_processors: # this is to get past memory problems when running large number processors in a container num_processors = 8 - + return num_processors def get_pool(self) -> Pool: # noqa: D102 @@ -262,12 +247,22 @@ def make_ERADist_object(name, opt, val) -> ERADist: # noqa: N802, D103 def create_one_marginal_distribution(rv_data) -> ERADist: # noqa: D103 - string = ( - f'quoFEM_RV_models.{rv_data["distribution"]}' # noqa: ISC003 - + f'{rv_data["inputType"]}.model_validate({rv_data})' - ) - rv = eval(string) # noqa: S307 - return make_ERADist_object(name=rv.ERAName, opt=rv.ERAOpt, val=rv.ERAVal) + try: + # Access the distribution and input type dynamically without eval + distribution_model = getattr( + quoFEM_RV_models, rv_data['distribution'] + rv_data['inputType'] + ) + rv = distribution_model.model_validate(rv_data) + + # Create the ERADist object + return make_ERADist_object(name=rv.ERAName, opt=rv.ERAOpt, val=rv.ERAVal) + + except AttributeError as e: + msg = f'Invalid distribution or input type: {e}' + raise AttributeError(msg) from e + except TypeError as e: + msg = f'model_validate does not accept the provided rv_data: {e}' + raise TypeError(msg) from e def make_list_of_marginal_distributions( # noqa: D103