diff --git a/.gitignore b/.gitignore index 8b5c0532..dc307b1d 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,5 @@ +brian2cuda/_version.py + # Compiled Python files *.py[cod] @@ -30,6 +32,9 @@ nosetests.xml # pycharm project files .idea +# VisualStudio Code +.vscode/ + # egg-info files /*.egg-info diff --git a/brian2cuda/__init__.py b/brian2cuda/__init__.py index 50cd759e..fdaa67b8 100644 --- a/brian2cuda/__init__.py +++ b/brian2cuda/__init__.py @@ -1,6 +1,7 @@ """ Package implementing the CUDA "standalone" `Device` and `CodeObject`. """ +import logging from . import cuda_prefs from .codeobject import CUDAStandaloneCodeObject diff --git a/brian2cuda/cuda_generator.py b/brian2cuda/cuda_generator.py index 0115569f..a7d39e1a 100644 --- a/brian2cuda/cuda_generator.py +++ b/brian2cuda/cuda_generator.py @@ -21,7 +21,7 @@ __all__ = ['CUDACodeGenerator', 'CUDAAtomicsCodeGenerator', 'c_data_type'] -logger = get_logger('brian2.codegen.generators.cuda_generator') +logger = get_logger(__name__) class ParallelisationError(Exception): diff --git a/brian2cuda/cuda_prefs.py b/brian2cuda/cuda_prefs.py index 7bcb3998..62b20e5d 100644 --- a/brian2cuda/cuda_prefs.py +++ b/brian2cuda/cuda_prefs.py @@ -8,7 +8,7 @@ from brian2.utils.logger import get_logger -logger = get_logger('brian2.devices.cuda_standalone.cuda_prefs') +logger = get_logger(__name__) def validate_bundle_size_expression(string): known_vars = ['mean', 'std', 'max', 'min'] diff --git a/brian2cuda/device.py b/brian2cuda/device.py index 579d396d..97c87c04 100644 --- a/brian2cuda/device.py +++ b/brian2cuda/device.py @@ -40,7 +40,7 @@ __all__ = [] -logger = get_logger('brian2.devices.cuda_standalone') +logger = get_logger(__name__) class CUDAWriter(CPPWriter): @@ -433,7 +433,15 @@ def check_openmp_compatible(self, nb_threads): if nb_threads > 0: raise NotImplementedError("Using OpenMP in a CUDA standalone project is not supported") - def generate_objects_source(self, writer, arange_arrays, synapses, static_array_specs, networks): + def generate_objects_source( + self, + writer, + arange_arrays, + synapses, + static_array_specs, + networks, + timed_arrays, + ): sm_multiplier = prefs.devices.cuda_standalone.SM_multiplier num_parallel_blocks = prefs.devices.cuda_standalone.parallel_blocks curand_generator_type = prefs.devices.cuda_standalone.random_number_generator_type @@ -443,7 +451,7 @@ def generate_objects_source(self, writer, arange_arrays, synapses, static_array_ for var, varname in self.arrays.items(): if var.name.endswith('space'): # get all eventspace variables self.eventspace_arrays[var] = varname - #if hasattr(var, 'owner') and isinstance(v.owner, Clock): + # if hasattr(var, 'owner') and isinstance(v.owner, Clock): if isinstance(var.owner, SpikeGeneratorGroup): self.spikegenerator_eventspaces.append(varname) for var in self.eventspace_arrays.keys(): @@ -474,6 +482,7 @@ def generate_objects_source(self, writer, arange_arrays, synapses, static_array_ networks=networks, code_objects=self.code_objects.values(), get_array_filename=self.get_array_filename, + get_array_name=self.get_array_name, all_codeobj_with_host_rng=self.codeobjects_with_rng["host_api"]["all_runs"], sm_multiplier=sm_multiplier, num_parallel_blocks=num_parallel_blocks, @@ -487,6 +496,7 @@ def generate_objects_source(self, writer, arange_arrays, synapses, static_array_ profile_statemonitor_copy_to_host=profile_statemonitor_copy_to_host, profile_statemonitor_vars=profile_statemonitor_vars, subgroups_with_spikemonitor=sorted(subgroups_with_spikemonitor), + timed_arrays=timed_arrays, variables_on_host_only=self.variables_on_host_only) # Reinsert deleted entries, in case we use self.arrays later? maybe unnecassary... self.arrays.update(self.eventspace_arrays) @@ -1227,7 +1237,7 @@ def generate_makefile(self, writer, cpp_compiler, cpp_compiler_flags, cpp_linker ) writer.write('makefile', makefile_tmp) - def build(self, directory='output', + def build(self, directory='output', results_directory="results", compile=True, run=True, debug=False, clean=False, with_output=True, disable_asserts=False, additional_source_files=None, @@ -1307,6 +1317,15 @@ def build(self, directory='output', directory = tempfile.mkdtemp(prefix='brian_standalone_') self.project_dir = directory ensure_directory(directory) + if os.path.isabs(results_directory): + raise TypeError( + "The 'results_directory' argument needs to be a relative path but was " + f"'{results_directory}'." + ) + # Translate path to absolute path which ends with / + self.results_dir = os.path.join( + os.path.abspath(os.path.join(directory, results_directory)), "" + ) # Determine compiler flags and directories cpp_compiler, cpp_default_extra_compile_args = get_compiler_and_args() @@ -1447,9 +1466,14 @@ def build(self, directory='output', self.generate_codeobj_source(self.writer) - self.generate_objects_source(self.writer, self.arange_arrays, - self.synapses, self.static_array_specs, - self.networks) + self.generate_objects_source( + self.writer, + self.arange_arrays, + self.synapses, + self.static_array_specs, + self.networks, + self.timed_arrays, + ) self.generate_network_source(self.writer) self.generate_synapses_classes_source(self.writer) self.generate_run_source(self.writer) @@ -1483,13 +1507,18 @@ def build(self, directory='output', if compile: self.compile_source(directory, cpp_compiler, debug, clean) if run: - self.run(directory, with_output, run_args) + self.run( + directory=directory, + results_directory=results_directory, + with_output=with_output, + run_args=run_args, + ) def network_run(self, net, duration, report=None, report_period=10*second, namespace=None, profile=False, level=0, **kwds): - ################################################### - ### This part is copied from CPPStandaoneDevice ### - ################################################### + #################################################### + ### This part is copied from CPPStandaloneDevice ### + #################################################### self.networks.add(net) if kwds: logger.warn(('Unsupported keyword argument(s) provided for run: ' @@ -1671,12 +1700,15 @@ def network_run(self, net, duration, report=None, report_period=10*second, if clock not in all_clocks: run_lines.append(f'{net.name}.add(&{clock.name}, NULL);') - run_lines.extend(self.code_lines['before_network_run']) - # run everything that is run on a clock + run_lines.extend(self.code_lines["before_network_run"]) + if not self.run_args_applied: + run_lines.append("set_from_command_line(args);") + self.run_args_applied = True run_lines.append( - f'{net.name}.run({float(duration)!r}, {report_call}, {float(report_period)!r});' + f"{net.name}.run({float(duration)!r}, {report_call}," + f" {float(report_period)!r});" ) - run_lines.extend(self.code_lines['after_network_run']) + run_lines.extend(self.code_lines["after_network_run"]) # for multiple runs, the random number buffer needs to be reset run_lines.append('random_number_buffer.run_finished();') # nvprof stuff @@ -1758,7 +1790,6 @@ def network_restore(self, net, *args, **kwds): 'supported in CUDA standalone')) - def prepare_codeobj_code_for_rng(codeobj): ''' Prepare a CodeObject for random number generation (RNG). @@ -1964,7 +1995,7 @@ def prepare_codeobj_code_for_rng(codeobj): # If the codeobjec does not need curand states for poisson, check if it needs # them for binomial calls if not codeobj.needs_curand_states: - match = re.search('_binomial\w*\(const int vectorisation_idx\)', codeobj.code.cu_file) + match = re.search(r'_binomial\w*\(const int vectorisation_idx\)', codeobj.code.cu_file) if match is not None: codeobj.needs_curand_states = True diff --git a/brian2cuda/templates/main.cu b/brian2cuda/templates/main.cu index eb617c5b..5cb822f6 100644 --- a/brian2cuda/templates/main.cu +++ b/brian2cuda/templates/main.cu @@ -25,8 +25,28 @@ {{report_func|autoindent}} +void set_from_command_line(const std::vector args) +{ + for (const auto& arg : args) { + // Split into two parts + size_t equal_sign = arg.find("="); + auto name = arg.substr(0, equal_sign); + auto value = arg.substr(equal_sign + 1, arg.length()); + brian::set_variable_by_name(name, value); + } +} + int main(int argc, char **argv) { + std::vector args(argv + 1, argv + argc); + if (args.size() >=2 && args[0] == "--results_dir") + { + brian::results_dir = args[1]; + #ifdef DEBUG + std::cout << "Setting results dir to '" << brian::results_dir << "'" << std::endl; + #endif + args.erase(args.begin(), args.begin()+2); + } {{'\n'.join(code_lines['before_start'])|autoindent}} // seed variable set in Python through brian2.seed() calls can use this diff --git a/brian2cuda/templates/objects.cu b/brian2cuda/templates/objects.cu index 1eed57df..05dfcabc 100644 --- a/brian2cuda/templates/objects.cu +++ b/brian2cuda/templates/objects.cu @@ -1,5 +1,19 @@ {% macro cu_file() %} +{% macro set_from_value(var_dtype, array_name) %} +{% if c_data_type(var_dtype) == 'double' %} +set_variable_from_value(name, {{array_name}}, var_size, (double)atof(s_value.c_str())); +{% elif c_data_type(var_dtype) == 'float' %} +set_variable_from_value(name, {{array_name}}, var_size, (float)atof(s_value.c_str())); +{% elif c_data_type(var_dtype) == 'int32_t' %} +set_variable_from_value(name, {{array_name}}, var_size, (int32_t)atoi(s_value.c_str())); +{% elif c_data_type(var_dtype) == 'int64_t' %} +set_variable_from_value(name, {{array_name}}, var_size, (int64_t)atol(s_value.c_str())); +{% elif c_data_type(var_dtype) == 'char' %} +set_variable_from_value(name, {{array_name}}, var_size, (char)atoi(s_value.c_str())); +{% endif %} +{%- endmacro %} + #include "objects.h" #include "synapses_classes.h" #include "brianlib/clocks.h" @@ -18,6 +32,7 @@ #include size_t brian::used_device_memory = 0; +std::string brian::results_dir = "results/"; // can be overwritten by --results_dir command line arg //////////////// clocks /////////////////// {% for clock in clocks | sort(attribute='name') %} @@ -29,6 +44,138 @@ Clock brian::{{clock.name}}; Network brian::{{net.name}}; {% endfor %} +void set_variable_from_value(std::string varname, char* var_pointer, size_t size, char value) { + #ifdef DEBUG + std::cout << "Setting '" << varname << "' to " << (value == 1 ? "True" : "False") << std::endl; + #endif + std::fill(var_pointer, var_pointer+size, value); +} + +template void set_variable_from_value(std::string varname, T* var_pointer, size_t size, T value) { + #ifdef DEBUG + std::cout << "Setting '" << varname << "' to " << value << std::endl; + #endif + std::fill(var_pointer, var_pointer+size, value); +} + +template void set_variable_from_file(std::string varname, T* var_pointer, size_t data_size, std::string filename) { + ifstream f; + streampos size; + #ifdef DEBUG + std::cout << "Setting '" << varname << "' from file '" << filename << "'" << std::endl; + #endif + f.open(filename, ios::in | ios::binary | ios::ate); + size = f.tellg(); + if (size != data_size) { + std::cerr << "Error reading '" << filename << "': file size " << size << " does not match expected size " << data_size << std::endl; + return; + } + f.seekg(0, ios::beg); + if (f.is_open()) + f.read(reinterpret_cast(var_pointer), data_size); + else + std::cerr << "Could not read '" << filename << "'" << std::endl; + if (f.fail()) + std::cerr << "Error reading '" << filename << "'" << std::endl; +} + +//////////////// set arrays by name /////// +void brian::set_variable_by_name(std::string name, std::string s_value) { + size_t var_size; + size_t data_size; + std::for_each(s_value.begin(), s_value.end(), [](char& c) // modify in-place + { + c = std::tolower(static_cast(c)); + }); + if (s_value == "true") + s_value = "1"; + else if (s_value == "false") + s_value = "0"; + // non-dynamic arrays + {% for var, varname in array_specs | dictsort(by='value') %} + {% if not var in dynamic_array_specs and not var.read_only %} + if (name == "{{var.owner.name}}.{{var.name}}") { + var_size = {{var.size}}; + data_size = {{var.size}}*sizeof({{c_data_type(var.dtype)}}); + if (s_value[0] == '-' || (s_value[0] >= '0' && s_value[0] <= '9')) { + // set from single value + {{ set_from_value(var.dtype, "brian::" + get_array_name(var)) }} + } else { + // set from file + set_variable_from_file(name, brian::{{get_array_name(var)}}, data_size, s_value); + } + {% if get_array_name(var) not in variables_on_host_only %} + // copy to device + CUDA_SAFE_CALL( + cudaMemcpy( + brian::dev{{get_array_name(var)}}, + &brian::{{get_array_name(var)}}[0], + sizeof(brian::{{get_array_name(var)}}[0])*brian::_num_{{get_array_name(var)}}, + cudaMemcpyHostToDevice + ) + ); + {% endif %} + return; + } + {% endif %} + {% endfor %} + // dynamic arrays (1d) + {% for var, varname in dynamic_array_specs | dictsort(by='value') %} + {% if not var.read_only %} + if (name == "{{var.owner.name}}.{{var.name}}") { + var_size = brian::{{get_array_name(var, access_data=False)}}.size(); + data_size = var_size*sizeof({{c_data_type(var.dtype)}}); + if (s_value[0] == '-' || (s_value[0] >= '0' && s_value[0] <= '9')) { + // set from single value + {{ set_from_value(var.dtype, "&brian::" + get_array_name(var, False) + "[0]") }} + } else { + // set from file + set_variable_from_file(name, &brian::{{get_array_name(var, False)}}[0], data_size, s_value); + } + {% if get_array_name(var) not in variables_on_host_only %} + // copy to device + CUDA_SAFE_CALL( + cudaMemcpy( + thrust::raw_pointer_cast(&brian::dev{{get_array_name(var, False)}}[0]), + &brian::{{get_array_name(var, False)}}[0], + sizeof(brian::{{get_array_name(var, False)}}[0])*brian::{{get_array_name(var, False)}}.size(), + cudaMemcpyHostToDevice + ) + ); + {% endif %} + return; + } + {% endif %} + {% endfor %} + {% for var, varname in timed_arrays | dictsort(by='value') %} + if (name == "{{varname}}.values") { + var_size = {{var.values.size}}; + data_size = var_size*sizeof({{c_data_type(var.values.dtype)}}); + if (s_value[0] == '-' || (s_value[0] >= '0' && s_value[0] <= '9')) { + // set from single value + {{ set_from_value(var.values.dtype, "brian::" + varname + "_values") }} + + } else { + // set from file + set_variable_from_file(name, brian::{{varname}}_values, data_size, s_value); + } + {% if varname + "_values" not in variables_on_host_only %} + // copy to device + CUDA_SAFE_CALL( + cudaMemcpy( + brian::dev{{varname}}_values, + &brian::{{varname}}_values[0], + data_size, + cudaMemcpyHostToDevice + ) + ); + {% endif %} + return; + } + {% endfor %} + std::cerr << "Cannot set unknown variable '" << name << "'." << std::endl; + exit(1); +} //////////////// arrays /////////////////// {% for var, varname in array_specs | dictsort(by='value') %} {% if not var in dynamic_array_specs %} @@ -382,7 +529,7 @@ void _write_arrays() ); {% endif %} ofstream outfile_{{varname}}; - outfile_{{varname}}.open("{{get_array_filename(var) | replace('\\', '\\\\')}}", ios::binary | ios::out); + outfile_{{varname}}.open(results_dir + "{{get_array_filename(var) | replace('\\', '\\\\')}}", ios::binary | ios::out); if(outfile_{{varname}}.is_open()) { outfile_{{varname}}.write(reinterpret_cast({{varname}}), {{var.size}}*sizeof({{c_data_type(var.dtype)}})); @@ -399,7 +546,7 @@ void _write_arrays() {{varname}} = dev{{varname}}; {% endif %} ofstream outfile_{{varname}}; - outfile_{{varname}}.open("{{get_array_filename(var) | replace('\\', '\\\\')}}", ios::binary | ios::out); + outfile_{{varname}}.open(results_dir + "{{get_array_filename(var) | replace('\\', '\\\\')}}", ios::binary | ios::out); if(outfile_{{varname}}.is_open()) { outfile_{{varname}}.write(reinterpret_cast(thrust::raw_pointer_cast(&{{varname}}[0])), {{varname}}.size()*sizeof({{c_data_type(var.dtype)}})); @@ -418,7 +565,7 @@ void _write_arrays() double copy_time_statemon; {% endif %} ofstream outfile_{{varname}}; - outfile_{{varname}}.open("{{get_array_filename(var) | replace('\\', '\\\\')}}", ios::binary | ios::out); + outfile_{{varname}}.open(results_dir + "{{get_array_filename(var) | replace('\\', '\\\\')}}", ios::binary | ios::out); if(outfile_{{varname}}.is_open()) { {% if var in profile_statemonitor_vars %} @@ -450,7 +597,7 @@ void _write_arrays() {% if profiled_codeobjects is defined and profiled_codeobjects %} // Write profiling info to disk ofstream outfile_profiling_info; - outfile_profiling_info.open("results/profiling_info.txt", ios::out); + outfile_profiling_info.open(results_dir + "profiling_info.txt", ios::out); if(outfile_profiling_info.is_open()) { {% for codeobj in profiled_codeobjects | sort %} @@ -476,7 +623,7 @@ void _write_arrays() {% endif %} // Write last run info to disk ofstream outfile_last_run_info; - outfile_last_run_info.open("results/last_run_info.txt", ios::out); + outfile_last_run_info.open(results_dir + "last_run_info.txt", ios::out); if(outfile_last_run_info.is_open()) { outfile_last_run_info << (Network::_last_run_time) << " " << (Network::_last_run_completed_fraction) << std::endl; @@ -600,6 +747,7 @@ typedef {{curand_float_type}} randomNumber_t; // random number type namespace brian { extern size_t used_device_memory; +extern std::string results_dir; //////////////// clocks /////////////////// {% for clock in clocks %} @@ -611,6 +759,8 @@ extern Clock {{clock.name}}; extern Network {{net.name}}; {% endfor %} +extern void set_variable_by_name(std::string, std::string); + //////////////// dynamic arrays 1d /////////// {% for var, varname in dynamic_array_specs | dictsort(by='value') %} extern thrust::host_vector<{{c_data_type(var.dtype)}}> {{varname}}; diff --git a/brian2cuda/tests/features/cuda_configuration.py b/brian2cuda/tests/features/cuda_configuration.py index 77bd51e5..a4b7d551 100644 --- a/brian2cuda/tests/features/cuda_configuration.py +++ b/brian2cuda/tests/features/cuda_configuration.py @@ -17,7 +17,7 @@ except ImportError: pass -logger = get_logger('brian2.devices.cuda_standalone.cuda_configuration') +logger = get_logger(__name__) __all__ = ['CUDAStandaloneConfiguration'] diff --git a/brian2cuda/tests/test_cuda_generator.py b/brian2cuda/tests/test_cuda_generator.py index f6bd231d..c3b1a9ca 100644 --- a/brian2cuda/tests/test_cuda_generator.py +++ b/brian2cuda/tests/test_cuda_generator.py @@ -1,10 +1,12 @@ +import functools + import pytest import numpy as np import logging from brian2 import * from brian2.tests.utils import assert_allclose -from brian2.utils.logger import catch_logs +from brian2.utils.logger import catch_logs as _catch_logs from brian2.devices.device import set_device from brian2.tests.test_synapses import permutation_analysis_good_examples from brian2.utils.stringtools import get_identifiers, deindent @@ -12,6 +14,9 @@ import brian2cuda from brian2cuda.cuda_generator import CUDACodeGenerator +# Only catch our own log messages +catch_logs = functools.partial(_catch_logs, only_from=["brian2cuda"]) + @pytest.mark.parametrize( 'func,zero_point,offset', @@ -198,24 +203,24 @@ def test_default_function_convertion_warnings(): assert len(logs1) == 1, len(logs1) assert logs1[0][0] == 'WARNING' - assert logs1[0][1] == 'brian2.codegen.generators.cuda_generator' + assert logs1[0][1] == 'brian2cuda.cuda_generator' assert len(logs2) == 1, len(logs2) assert logs2[0][0] == 'WARNING' - assert logs2[0][1] == 'brian2.codegen.generators.cuda_generator' + assert logs2[0][1] == 'brian2cuda.cuda_generator' assert len(logs3) == 0, len(logs3) assert len(logs4) == 0, len(logs4) assert len(logs5) == 1, len(logs5) assert logs5[0][0] == 'WARNING' - assert logs5[0][1] == 'brian2.codegen.generators.cuda_generator' + assert logs5[0][1] == 'brian2cuda.cuda_generator' assert len(logs6) == 1, len(logs6) assert logs6[0][0] == 'WARNING' - assert logs6[0][1] == 'brian2.codegen.generators.cuda_generator' + assert logs6[0][1] == 'brian2cuda.cuda_generator' assert len(logs7) == 1, len(logs7) assert logs7[0][0] == 'WARNING' - assert logs7[0][1] == 'brian2.codegen.generators.cuda_generator' + assert logs7[0][1] == 'brian2cuda.cuda_generator' assert len(logs8) == 1, len(logs8) assert logs8[0][0] == 'WARNING' - assert logs8[0][1] == 'brian2.codegen.generators.cuda_generator' + assert logs8[0][1] == 'brian2cuda.cuda_generator' @@ -292,7 +297,7 @@ def test_atomics_parallelisation(code): device.reinit() device.activate() cuda_generator_messages = [l for l in caught_logs - if l[1]=='brian2.codegen.generators.cuda_generator'] + if l[1]=='brian2cuda.cuda_generator'] if should_be_able_to_use_ufunc_at: assert len(cuda_generator_messages) == 0, cuda_generator_messages else: diff --git a/brian2cuda/tests/test_gpu_detection.py b/brian2cuda/tests/test_gpu_detection.py index be81f5ce..2434b485 100644 --- a/brian2cuda/tests/test_gpu_detection.py +++ b/brian2cuda/tests/test_gpu_detection.py @@ -1,3 +1,4 @@ +import functools import os import logging from io import StringIO @@ -6,7 +7,7 @@ from numpy.testing import assert_equal from brian2 import prefs, ms, run, set_device, device -from brian2.utils.logger import catch_logs +from brian2.utils.logger import catch_logs as _catch_logs from brian2.core.preferences import PreferenceError from brian2cuda.utils.gputools import ( reset_cuda_installation, @@ -17,6 +18,9 @@ restore_gpu_selection, ) +# Only catch our own log messages +catch_logs = functools.partial(_catch_logs, only_from=["brian2cuda"]) + ### Pytest fixtures ### # Detecting the CUDA installation and GPU to use is happening only once and sets global @@ -108,7 +112,7 @@ def test_wrong_cuda_path_warning(reset_cuda_detection, use_default_prefs, monkey assert len(logs) == 1, logs log = logs[0] assert log[0] == "WARNING" - assert log[1] == "brian2.devices.cuda_standalone" + assert log[1] == "brian2cuda.utils.gputools" assert log[2].startswith("Couldn't find `nvcc` binary ") @@ -149,7 +153,7 @@ def test_warning_compute_capability_set_twice(reset_gpu_detection, use_default_p assert len(logs) == 1, logs log = logs[0] assert log[0] == "WARNING" - assert log[1] == "brian2.devices.cuda_standalone" + assert log[1] == "brian2cuda.device" assert log[2].startswith("GPU architecture for compilation was specified via ") diff --git a/brian2cuda/tests/test_neurongroup.py b/brian2cuda/tests/test_neurongroup.py index 7a4ee36d..24624437 100644 --- a/brian2cuda/tests/test_neurongroup.py +++ b/brian2cuda/tests/test_neurongroup.py @@ -1,4 +1,3 @@ -import os import numpy as np import pytest from numpy.testing import assert_equal @@ -7,13 +6,13 @@ from brian2.core.magic import run from brian2.groups.neurongroup import NeuronGroup from brian2.synapses import Synapses -from brian2.tests import make_argv from brian2.tests.utils import assert_allclose from brian2.utils.logger import catch_logs from brian2.units import second # Adapted from brian2/tests/test_neurongroup.py::test_semantics_floor_division # (brian2 test asserts for 0 warnings, brian2cuda warns for int to float64 conversion) +# Can be removed with the next version that includes brian-team/brian2#1519 @pytest.mark.standalone_compatible def test_semantics_floor_division(): # See Brian2 github issues #815 and #661 diff --git a/brian2cuda/tests/test_random_number_generation.py b/brian2cuda/tests/test_random_number_generation.py index 98342a5c..62908e8b 100644 --- a/brian2cuda/tests/test_random_number_generation.py +++ b/brian2cuda/tests/test_random_number_generation.py @@ -122,8 +122,8 @@ def test_rng_occurrence_counting(): S_randn = Synapses(G_randn, G_randn, on_pre='''x += randn()''', name='S_randn') S_randn.connect() - G_poisson = NeuronGroup(10, '''dx/dt = poisson(1)/ms : 1''', threshold='True', name='G_poisson') - S_poisson = Synapses(G_poisson, G_poisson, on_pre='''x += poisson(1)''', name='S_poisson') + G_poisson = NeuronGroup(10, '''dx/dt = poisson(2.0)/ms : 1''', threshold='True', name='G_poisson') + S_poisson = Synapses(G_poisson, G_poisson, on_pre='''x += poisson(2.0)''', name='S_poisson') S_poisson.connect() run(0*ms) @@ -149,9 +149,9 @@ def test_rng_occurrence_counting(): if rng_type == 'poisson_0': co_lamda = code_object.poisson_lamdas['poisson_0'] - assert co_lamda == 1.0, co_lamda + assert co_lamda == 2.0, co_lamda d_lamda = device.all_poisson_lamdas[code_object.name]['poisson_0'] - assert d_lamda == 1.0, f"{d_lamda} {code_object.name}" + assert d_lamda == 2.0, f"{d_lamda} {code_object.name}" assert not code_object.needs_curand_states diff --git a/brian2cuda/utils/gputools.py b/brian2cuda/utils/gputools.py index 93ba2bc7..522da64f 100644 --- a/brian2cuda/utils/gputools.py +++ b/brian2cuda/utils/gputools.py @@ -13,7 +13,7 @@ from brian2.utils.logger import get_logger from brian2cuda.utils.logger import report_issue_message -logger = get_logger("brian2.devices.cuda_standalone") +logger = get_logger(__name__) # To list all GPUs: nvidia-smi -L diff --git a/frozen_repos/brian2 b/frozen_repos/brian2 index 04c91b82..ec5b8ce0 160000 --- a/frozen_repos/brian2 +++ b/frozen_repos/brian2 @@ -1 +1 @@ -Subproject commit 04c91b824ef6d8c9872e80f8d14154ee815be058 +Subproject commit ec5b8ce019d49063ff9a3b821b9b9d400850a331 diff --git a/frozen_repos/brian2.diff b/frozen_repos/brian2.diff index e7d5e428..c2a02515 100644 --- a/frozen_repos/brian2.diff +++ b/frozen_repos/brian2.diff @@ -1,8 +1,8 @@ diff --git a/brian2/devices/cpp_standalone/device.py b/brian2/devices/cpp_standalone/device.py -index b4f34065..ffdac84a 100644 +index 7cb100b9..10f878c6 100644 --- a/brian2/devices/cpp_standalone/device.py +++ b/brian2/devices/cpp_standalone/device.py -@@ -1240,6 +1240,7 @@ def run(self, directory, with_output, run_args): +@@ -1341,6 +1341,7 @@ def run( run_time, completed_fraction = last_run_info.split() self._last_run_time = float(run_time) self._last_run_completed_fraction = float(completed_fraction) @@ -23,7 +23,7 @@ index 9ad11c0b..7442aaf7 100644 from .base import * from . import input, monitors, neurongroup, speed, synapses diff --git a/brian2/tests/features/base.py b/brian2/tests/features/base.py -index d8a83041..3ef9c96d 100644 +index 1f19da86..ee692cde 100644 --- a/brian2/tests/features/base.py +++ b/brian2/tests/features/base.py @@ -11,6 +11,7 @@ @@ -261,7 +261,7 @@ index d8a83041..3ef9c96d 100644 overheadstime = float(runtime) for codeobjname, proftime in prof_info: @@ -568,17 +672,35 @@ def run_speed_tests( - ] = overheadstime + ) if verbose: print("]", end=" ") + sys.stdout.flush() diff --git a/pyproject.toml b/pyproject.toml index 49fead0e..ba50dd87 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -7,7 +7,7 @@ authors = [ ] requires-python = '>=3.9' dependencies = [ - 'brian2==2.5.4', + 'brian2==2.6.0', ] dynamic = ["version", "readme"] description = 'A Brian2 extension to simulate spiking neural networks on GPUs'