Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Update for Brian 2.6 #314

Merged
merged 10 commits into from
Mar 26, 2024
Merged
5 changes: 5 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
brian2cuda/_version.py

# Compiled Python files
*.py[cod]

Expand Down Expand Up @@ -30,6 +32,9 @@ nosetests.xml
# pycharm project files
.idea

# VisualStudio Code
.vscode/

# egg-info files
/*.egg-info

Expand Down
2 changes: 1 addition & 1 deletion brian2cuda/cuda_generator.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@
__all__ = ['CUDACodeGenerator', 'CUDAAtomicsCodeGenerator', 'c_data_type']


logger = get_logger('brian2.codegen.generators.cuda_generator')
logger = get_logger(__name__)


class ParallelisationError(Exception):
Expand Down
2 changes: 1 addition & 1 deletion brian2cuda/cuda_prefs.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
from brian2.utils.logger import get_logger


logger = get_logger('brian2.devices.cuda_standalone.cuda_prefs')
logger = get_logger(__name__)

def validate_bundle_size_expression(string):
known_vars = ['mean', 'std', 'max', 'min']
Expand Down
65 changes: 48 additions & 17 deletions brian2cuda/device.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@

__all__ = []

logger = get_logger('brian2.devices.cuda_standalone')
logger = get_logger(__name__)


class CUDAWriter(CPPWriter):
Expand Down Expand Up @@ -433,7 +433,15 @@ def check_openmp_compatible(self, nb_threads):
if nb_threads > 0:
raise NotImplementedError("Using OpenMP in a CUDA standalone project is not supported")

def generate_objects_source(self, writer, arange_arrays, synapses, static_array_specs, networks):
def generate_objects_source(
self,
writer,
arange_arrays,
synapses,
static_array_specs,
networks,
timed_arrays,
):
sm_multiplier = prefs.devices.cuda_standalone.SM_multiplier
num_parallel_blocks = prefs.devices.cuda_standalone.parallel_blocks
curand_generator_type = prefs.devices.cuda_standalone.random_number_generator_type
Expand All @@ -443,7 +451,7 @@ def generate_objects_source(self, writer, arange_arrays, synapses, static_array_
for var, varname in self.arrays.items():
if var.name.endswith('space'): # get all eventspace variables
self.eventspace_arrays[var] = varname
#if hasattr(var, 'owner') and isinstance(v.owner, Clock):
# if hasattr(var, 'owner') and isinstance(v.owner, Clock):
if isinstance(var.owner, SpikeGeneratorGroup):
self.spikegenerator_eventspaces.append(varname)
for var in self.eventspace_arrays.keys():
Expand Down Expand Up @@ -474,6 +482,7 @@ def generate_objects_source(self, writer, arange_arrays, synapses, static_array_
networks=networks,
code_objects=self.code_objects.values(),
get_array_filename=self.get_array_filename,
get_array_name=self.get_array_name,
all_codeobj_with_host_rng=self.codeobjects_with_rng["host_api"]["all_runs"],
sm_multiplier=sm_multiplier,
num_parallel_blocks=num_parallel_blocks,
Expand All @@ -487,6 +496,7 @@ def generate_objects_source(self, writer, arange_arrays, synapses, static_array_
profile_statemonitor_copy_to_host=profile_statemonitor_copy_to_host,
profile_statemonitor_vars=profile_statemonitor_vars,
subgroups_with_spikemonitor=sorted(subgroups_with_spikemonitor),
timed_arrays=timed_arrays,
variables_on_host_only=self.variables_on_host_only)
# Reinsert deleted entries, in case we use self.arrays later? maybe unnecassary...
self.arrays.update(self.eventspace_arrays)
Expand Down Expand Up @@ -1227,7 +1237,7 @@ def generate_makefile(self, writer, cpp_compiler, cpp_compiler_flags, cpp_linker
)
writer.write('makefile', makefile_tmp)

def build(self, directory='output',
def build(self, directory='output', results_directory="results",
compile=True, run=True, debug=False, clean=False,
with_output=True, disable_asserts=False,
additional_source_files=None,
Expand Down Expand Up @@ -1307,6 +1317,15 @@ def build(self, directory='output',
directory = tempfile.mkdtemp(prefix='brian_standalone_')
self.project_dir = directory
ensure_directory(directory)
if os.path.isabs(results_directory):
raise TypeError(
"The 'results_directory' argument needs to be a relative path but was "
f"'{results_directory}'."
)
# Translate path to absolute path which ends with /
self.results_dir = os.path.join(
os.path.abspath(os.path.join(directory, results_directory)), ""
)

# Determine compiler flags and directories
cpp_compiler, cpp_default_extra_compile_args = get_compiler_and_args()
Expand Down Expand Up @@ -1447,9 +1466,14 @@ def build(self, directory='output',

self.generate_codeobj_source(self.writer)

self.generate_objects_source(self.writer, self.arange_arrays,
self.synapses, self.static_array_specs,
self.networks)
self.generate_objects_source(
self.writer,
self.arange_arrays,
self.synapses,
self.static_array_specs,
self.networks,
self.timed_arrays,
)
self.generate_network_source(self.writer)
self.generate_synapses_classes_source(self.writer)
self.generate_run_source(self.writer)
Expand Down Expand Up @@ -1483,13 +1507,18 @@ def build(self, directory='output',
if compile:
self.compile_source(directory, cpp_compiler, debug, clean)
if run:
self.run(directory, with_output, run_args)
self.run(
directory=directory,
results_directory=results_directory,
with_output=with_output,
run_args=run_args,
)

def network_run(self, net, duration, report=None, report_period=10*second,
namespace=None, profile=False, level=0, **kwds):
###################################################
### This part is copied from CPPStandaoneDevice ###
###################################################
####################################################
### This part is copied from CPPStandaloneDevice ###
####################################################
self.networks.add(net)
if kwds:
logger.warn(('Unsupported keyword argument(s) provided for run: '
Expand Down Expand Up @@ -1671,12 +1700,15 @@ def network_run(self, net, duration, report=None, report_period=10*second,
if clock not in all_clocks:
run_lines.append(f'{net.name}.add(&{clock.name}, NULL);')

run_lines.extend(self.code_lines['before_network_run'])
# run everything that is run on a clock
run_lines.extend(self.code_lines["before_network_run"])
if not self.run_args_applied:
run_lines.append("set_from_command_line(args);")
self.run_args_applied = True
run_lines.append(
f'{net.name}.run({float(duration)!r}, {report_call}, {float(report_period)!r});'
f"{net.name}.run({float(duration)!r}, {report_call},"
f" {float(report_period)!r});"
)
run_lines.extend(self.code_lines['after_network_run'])
run_lines.extend(self.code_lines["after_network_run"])
# for multiple runs, the random number buffer needs to be reset
run_lines.append('random_number_buffer.run_finished();')
# nvprof stuff
Expand Down Expand Up @@ -1758,7 +1790,6 @@ def network_restore(self, net, *args, **kwds):
'supported in CUDA standalone'))



def prepare_codeobj_code_for_rng(codeobj):
'''
Prepare a CodeObject for random number generation (RNG).
Expand Down Expand Up @@ -1964,7 +1995,7 @@ def prepare_codeobj_code_for_rng(codeobj):
# If the codeobjec does not need curand states for poisson, check if it needs
# them for binomial calls
if not codeobj.needs_curand_states:
match = re.search('_binomial\w*\(const int vectorisation_idx\)', codeobj.code.cu_file)
match = re.search(r'_binomial\w*\(const int vectorisation_idx\)', codeobj.code.cu_file)
if match is not None:
codeobj.needs_curand_states = True

Expand Down
20 changes: 20 additions & 0 deletions brian2cuda/templates/main.cu
Original file line number Diff line number Diff line change
Expand Up @@ -25,8 +25,28 @@

{{report_func|autoindent}}

void set_from_command_line(const std::vector<std::string> args)
{
for (const auto& arg : args) {
// Split into two parts
size_t equal_sign = arg.find("=");
auto name = arg.substr(0, equal_sign);
auto value = arg.substr(equal_sign + 1, arg.length());
brian::set_variable_by_name(name, value);
}
}

int main(int argc, char **argv)
{
std::vector<std::string> args(argv + 1, argv + argc);
if (args.size() >=2 && args[0] == "--results_dir")
{
brian::results_dir = args[1];
#ifdef DEBUG
std::cout << "Setting results dir to '" << brian::results_dir << "'" << std::endl;
#endif
args.erase(args.begin(), args.begin()+2);
}
{{'\n'.join(code_lines['before_start'])|autoindent}}

// seed variable set in Python through brian2.seed() calls can use this
Expand Down
Loading
Loading