diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml
index 96d68513a9..a37deab79e 100644
--- a/.github/workflows/build.yml
+++ b/.github/workflows/build.yml
@@ -72,7 +72,6 @@ jobs:
- name: run framework tests
env:
- HBP_AUTH_TOKEN: ${{ secrets.HBP_AUTH_TOKEN }} # token or (secret+id) are needed for siibra tests
KEYCLOAK_CLIENT_ID: ${{ secrets.KEYCLOAK_CLIENT_ID }} # KEYCLOAK_CLIENT_ID & SECRET have priority in tests
KEYCLOAK_CLIENT_SECRET: ${{ secrets.KEYCLOAK_CLIENT_SECRET }}
run: pytest -v tvb_framework --cov --cov-report=xml --ignore=tvb_framework/tvb/interfaces/rest/client/tests/rest_test.py && mv coverage.xml coverage-framework.xml
diff --git a/.github/workflows/notebooks.yml b/.github/workflows/notebooks.yml
index 6483701141..f22a435684 100644
--- a/.github/workflows/notebooks.yml
+++ b/.github/workflows/notebooks.yml
@@ -58,7 +58,5 @@ jobs:
python setup.py develop
- name: run notebooks
- env:
- HBP_AUTH_TOKEN: ${{ secrets.HBP_AUTH_TOKEN }} # token or (secret+id) are needed for siibra tests
run: |
- python ./tvb_build/notebook_runner.py ./tvb_documentation/demos siibra
\ No newline at end of file
+ python ./tvb_build/notebook_runner.py ./tvb_documentation/demos
\ No newline at end of file
diff --git a/tvb_build/docker/requirements_group b/tvb_build/docker/requirements_group
index 8d2a113b0a..eafa0e6b93 100644
--- a/tvb_build/docker/requirements_group
+++ b/tvb_build/docker/requirements_group
@@ -49,7 +49,7 @@ lxml
pylems
docutils
Pillow
-siibra==0.4a35
+siibra==1.0a5
bctpy
Deprecated
kubernetes
diff --git a/tvb_build/notebook_runner.py b/tvb_build/notebook_runner.py
index a1e899a2d1..bbefaa7c4a 100644
--- a/tvb_build/notebook_runner.py
+++ b/tvb_build/notebook_runner.py
@@ -45,13 +45,16 @@ def execute_notebook(notebook):
if __name__ == '__main__':
+ if len(sys.argv) < 1:
+ raise AttributeError("please insert the input path")
+ in_path = sys.argv[1]
+
if len(sys.argv) >= 2:
- in_path = sys.argv[1]
sub_folders = sys.argv[2:]
# compute relative paths
sub_folders = [os.path.join(in_path, folder) for folder in sub_folders]
else:
- raise AttributeError("please insert the input path")
+ sub_folders = []
skipped_notebooks = [
'exploring_time_series_interactive.ipynb', # run separately because of other notebook dependency
diff --git a/tvb_build/tvb_build/third_party_licenses/packages_accepted.xml b/tvb_build/tvb_build/third_party_licenses/packages_accepted.xml
index 08cc3a9366..cc767a30ee 100644
--- a/tvb_build/tvb_build/third_party_licenses/packages_accepted.xml
+++ b/tvb_build/tvb_build/third_party_licenses/packages_accepted.xml
@@ -2846,11 +2846,11 @@
-
+
-
+
diff --git a/tvb_documentation/demos/siibra/06-SIIBRA-TVB_Updated.ipynb b/tvb_documentation/demos/siibra/06-SIIBRA-TVB_Updated.ipynb
deleted file mode 100644
index 5d0acf8886..0000000000
--- a/tvb_documentation/demos/siibra/06-SIIBRA-TVB_Updated.ipynb
+++ /dev/null
@@ -1,518 +0,0 @@
-{
- "cells": [
- {
- "cell_type": "markdown",
- "id": "d6f5d0d6",
- "metadata": {
- "tags": []
- },
- "source": [
- "### Prototype of integrating TVB with SIIBRA - Download a structural connectivity from SIIBRA and setup a TVB simulation with it\n",
- "#### (This is an adaptation of https://github.com/dickscheid/siibra-tutorials/blob/main/06-SIIBRA-TVB.ipynb, which used an older version of siibra-python)"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "f2f38352-0923-404a-bd99-551c17aa2cad",
- "metadata": {
- "tags": []
- },
- "source": [
- "### Imports and setup"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "c4aade82-4a7a-40d4-a912-d196bb9376c2",
- "metadata": {},
- "outputs": [],
- "source": [
- "%matplotlib widget"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "54fc39aa",
- "metadata": {},
- "outputs": [],
- "source": [
- "import os\n",
- "import numpy as np\n",
- "import siibra\n",
- "from tvb.simulator.lab import *"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "5d493b1c-94cd-4bfa-876b-7d84fc3f7c94",
- "metadata": {},
- "source": [
- "### Make sure an EBRAINS token exists"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "43a1256c-906f-42c0-97d6-76bc7d546f56",
- "metadata": {},
- "outputs": [],
- "source": [
- "if not 'HBP_AUTH_TOKEN' in os.environ:\n",
- " print(\"Missing auth token for siibra!!\")\n",
- " \n",
- "#os.environ['HBP_AUTH_TOKEN'] = ''\n",
- "# alterantively, use siibra service for authentication into EBRAINS\n",
- "# siibra.fetch_ebrains_token()"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "234112e8-8021-475e-bbd5-2cddc5efbaf2",
- "metadata": {
- "tags": []
- },
- "source": [
- "### Get connectivity matrices"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "562d3d1a-546a-4dc2-89df-8b7a1c6a8738",
- "metadata": {},
- "source": [
- "#### Connectivity weights"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "5731d2d4",
- "metadata": {},
- "outputs": [],
- "source": [
- "# get desired brain parcellation\n",
- "atlas = siibra.atlases[\"human\"]\n",
- "jubrain = atlas.get_parcellation(\"julich 2.9\")"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "9c658c6c-4ac7-439c-b293-15d8fc866a66",
- "metadata": {},
- "outputs": [],
- "source": [
- "# get the Streamline Counts (conn. weights) features\n",
- "# this gives a list of Streamline Counts objects corresponding to a different cohort; each Streamline Count obj. has multiple connectivities\n",
- "features = siibra.features.get(jubrain, siibra.features.connectivity.StreamlineCounts)\n",
- "for f in features:\n",
- " print(f.cohort)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "ad608d87-2b8f-4ef4-8fe1-113d396dba62",
- "metadata": {},
- "outputs": [],
- "source": [
- "# select one of the cohorts and get all the connectivities for all the subjects\n",
- "conn_weights = features[0]\n",
- "print(f'There are connectivity weights available for {len(conn_weights.subjects)} subjects')"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "7f69a32e-b9c0-473a-8423-a162abdfbecb",
- "metadata": {},
- "outputs": [],
- "source": [
- "# get the connectivity matrix for one of the subjects\n",
- "subject = conn_weights.subjects[0] # this gives us a string containing the subject id\n",
- "weights_matrix = conn_weights.get_matrix(subject) # matrix stored as pandas DataFrame\n",
- "weights_matrix"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "4a7f6960-b774-4f38-9925-956ee0dd9487",
- "metadata": {},
- "source": [
- "#### Connectivity tracts"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "d6389194-13ca-47d8-a838-fe399e22af82",
- "metadata": {},
- "outputs": [],
- "source": [
- "# get the Streamline Lengths (conn. tracts) features\n",
- "# this gives a list of Streamline Lengths object corresponding to a different cohort; each Streamline Lengths obj. has multiple connectivities\n",
- "features_lengths = siibra.features.get(jubrain, siibra.features.connectivity.StreamlineLengths)\n",
- "for f in features_lengths:\n",
- " print(f.cohort)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "22eca258-bd0e-490e-8307-ed64cbacc56c",
- "metadata": {},
- "outputs": [],
- "source": [
- "# select one of the cohorts and get all the connectivities for all the subjects\n",
- "conn_tracts = features_lengths[0]\n",
- "conn_tracts.name\n",
- "print(f'There are connectivity tracts available for {len(conn_tracts.subjects)} subjects')"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "953e494f-148b-498b-ae43-877d96771f69",
- "metadata": {},
- "outputs": [],
- "source": [
- "# get the connectivity matrix for one of the subjects\n",
- "subject = conn_tracts.subjects[0] # this gives us a string containing the subject id\n",
- "tracts_matrix = conn_tracts.get_matrix(subject) # matrix stored as pandas DataFrame\n",
- "tracts_matrix[10:20]"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "12819bbb-b345-4ceb-a177-5d2c3e5419e0",
- "metadata": {},
- "outputs": [],
- "source": [
- "# check that the weights and tracts have the same format\n",
- "assert len(weights_matrix) == len(tracts_matrix)\n",
- "assert weights_matrix.columns.to_list() == tracts_matrix.columns.to_list()\n",
- "assert (weights_matrix.index == tracts_matrix.index).all()"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "082f19b5-9a3d-460b-bf6d-2fb08dd292c5",
- "metadata": {
- "tags": []
- },
- "source": [
- "### Get region names\n",
- "##### (In siibra the indices of weights/tracts matrix are Region objects*)\n",
- "\\* sometimes they are tuples, where first value is name of parent of that region and second value is the actual region"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "3410f623-3df4-4113-9192-1308dc34a670",
- "metadata": {
- "scrolled": true,
- "tags": []
- },
- "outputs": [],
- "source": [
- "# get list of region objects\n",
- "regions = weights_matrix.index.values\n",
- "# because sometimes we have tuples instead of regions, correct the list to have only regions\n",
- "regions = [r[1] if type(r)==tuple else r for r in regions]\n",
- "regions"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "9d30e4f9-3043-4e3e-b3a4-e8cd7bc180fb",
- "metadata": {},
- "outputs": [],
- "source": [
- "# get region names and the corresponding hemispheres\n",
- "reg_names = []\n",
- "hemi = []\n",
- "for r in regions:\n",
- " name = r.name\n",
- " reg_names.append(name)\n",
- " \n",
- " if 'right' in name:\n",
- " hemi.append(1)\n",
- " # there is a bug on the else branch: there are regions which refer to both the right and left hemishperes;\n",
- " # right now they are put in the left hemisphere, but this is wrong! and should be corrected in some way\n",
- " else:\n",
- " hemi.append(0)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "1ccd3abb-e08d-417d-8700-64cdd89643b6",
- "metadata": {},
- "outputs": [],
- "source": [
- "reg_names[:5]"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "9507de90-43f6-417c-af33-637a475ae75f",
- "metadata": {},
- "outputs": [],
- "source": [
- "# check the correctness of hemi array\n",
- "hemi[:5]"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "66d89fd2-b302-4611-91b6-a95c6993e521",
- "metadata": {
- "tags": []
- },
- "outputs": [],
- "source": [
- "# save regions related to both hemispheres for future reference?\n",
- "both_hemi_regions = []\n",
- "for r in regions:\n",
- " name = r.name\n",
- " if 'left' not in name and 'right' not in name:\n",
- " both_hemi_regions.append(r)\n",
- "both_hemi_regions"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "9714cb86-a5eb-4544-b5cb-c85a3c365c70",
- "metadata": {},
- "source": [
- "### Get region positions"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "8c4260a3-c708-4936-a402-5b3ba63a63ea",
- "metadata": {},
- "outputs": [],
- "source": [
- "# first we need a space in which the positions are computed\n",
- "space = atlas.spaces.MNI_152_ICBM_2009C_NONLINEAR_ASYMMETRIC # commonly used space in other examples"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "1a389ffe-c155-4254-ba30-7177349748ca",
- "metadata": {},
- "outputs": [],
- "source": [
- "r1 = regions[0]"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "3df69268-9427-4120-92ba-d44e57289c4d",
- "metadata": {},
- "outputs": [],
- "source": [
- "tuple(r1.spatial_props(space)['components'][0]['centroid'])"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "746a3cff-4bcd-4e40-a639-cb3773441bf2",
- "metadata": {
- "scrolled": true,
- "tags": []
- },
- "outputs": [],
- "source": [
- "positions = []\n",
- "for r in regions:\n",
- " spatial_props = r.spatial_props(space) # gives a dict of spatial props\n",
- " # get centroids list\n",
- " centroids = spatial_props['components']\n",
- " # get siibra.Point object from centroid list; some regions have multiple centroids, but only the first one is selected\n",
- " centroids = centroids[0]['centroid']\n",
- " # tuple() gives the coordinates of a centroid\n",
- " positions.append(tuple(centroids))\n",
- "positions"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "796fa04f-029d-423a-aaa9-e8c7f616630e",
- "metadata": {},
- "source": [
- "### Create TVB Connectivity with data obtained using siibra"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "953e0bcb-e8f2-4d7e-9c31-aca50c484cf7",
- "metadata": {},
- "outputs": [],
- "source": [
- "# Moving info from Siibra into TVB concepts\n",
- "conn = connectivity.Connectivity()\n",
- "conn.weights = weights_matrix.to_numpy()\n",
- "conn.tract_lengths = tracts_matrix.to_numpy()\n",
- "conn.region_labels = np.array(reg_names)\n",
- "conn.hemispheres = np.array(hemi, dtype=np.bool_)\n",
- "conn.centres = np.array(positions)\n",
- "\n",
- "conn.configure()\n",
- "conn"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "091bf48d-934d-4d16-b74c-b169579e93b2",
- "metadata": {},
- "outputs": [],
- "source": [
- "plot_connectivity(connectivity=conn)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "d105f9c4-1229-4e56-9baa-eb3fc97eee38",
- "metadata": {},
- "outputs": [],
- "source": [
- "# Save connectivity in TVB accepted format\n",
- "centres_content = np.concatenate((np.array(reg_names)[:, None], positions), axis=1)\n",
- "\n",
- "\n",
- "root_folder = 'julich_conn'\n",
- "out_dir = os.path.join(root_folder, subject)\n",
- "if not os.path.exists(out_dir):\n",
- " os.makedirs(out_dir)\n",
- "\n",
- "np.savetxt(os.path.join(out_dir, \"centers.txt\"), centres_content, \"%s\")\n",
- "np.savetxt(os.path.join(out_dir, \"hemispheres.txt\"), hemi, \"%s\")\n",
- "np.savetxt(os.path.join(out_dir, \"weights.txt\"), weights_matrix.to_numpy(), \"%f\")\n",
- "np.savetxt(os.path.join(out_dir, \"tract_lenghts.txt\"), tracts_matrix.to_numpy(), \"%f\")"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "2d8fcbbf-f537-4763-889a-89adba49b2f8",
- "metadata": {},
- "source": [
- "### Create simulation using the obtained connectivity"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "dabaece2-a288-4f3c-95d8-d790756715f5",
- "metadata": {},
- "outputs": [],
- "source": [
- "sim = simulator.Simulator()\n",
- "sim.connectivity = conn\n",
- "sim.simulation_length = 1024\n",
- "sim.configure()"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "86bdab92-3ebb-4868-8509-4ef80f674a72",
- "metadata": {},
- "outputs": [],
- "source": [
- "(time, data), = sim.run()"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "1f33005b-fe98-4467-b80e-745f902baa52",
- "metadata": {},
- "outputs": [],
- "source": [
- "time.size"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "d408ee72-b5be-4d3f-847e-c0a9980c993c",
- "metadata": {},
- "outputs": [],
- "source": [
- "data.shape"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "f99fa008-749d-42ec-9769-a5d57a382d40",
- "metadata": {},
- "outputs": [],
- "source": [
- "tsr = time_series.TimeSeriesRegion(\n",
- " data=data,\n",
- " connectivity=sim.connectivity,\n",
- " sample_period=sim.monitors[0].period)\n",
- "tsr.configure()"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "09cfecb0-34f1-4351-945f-bdb6663815e7",
- "metadata": {},
- "outputs": [],
- "source": [
- "import tvb.simulator.plot.timeseries_interactive as ts_int\n",
- "tsi = ts_int.TimeSeriesInteractive(time_series=tsr)\n",
- "tsi.configure()\n",
- "tsi.show()"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "c1c0f6dc",
- "metadata": {},
- "outputs": [],
- "source": []
- }
- ],
- "metadata": {
- "kernelspec": {
- "display_name": "Python 3 (ipykernel)",
- "language": "python",
- "name": "python3"
- },
- "language_info": {
- "codemirror_mode": {
- "name": "ipython",
- "version": 3
- },
- "file_extension": ".py",
- "mimetype": "text/x-python",
- "name": "python",
- "nbconvert_exporter": "python",
- "pygments_lexer": "ipython3",
- "version": "3.10.4"
- }
- },
- "nbformat": 4,
- "nbformat_minor": 5
-}
diff --git a/tvb_documentation/demos/siibra/siibra_tvb_api.ipynb b/tvb_documentation/demos/siibra_tvb_api.ipynb
similarity index 86%
rename from tvb_documentation/demos/siibra/siibra_tvb_api.ipynb
rename to tvb_documentation/demos/siibra_tvb_api.ipynb
index 75d5e1d1c1..78bb2d6378 100644
--- a/tvb_documentation/demos/siibra/siibra_tvb_api.ipynb
+++ b/tvb_documentation/demos/siibra_tvb_api.ipynb
@@ -6,32 +6,7 @@
"metadata": {},
"source": [
"## API integrating the EBRAINS KG with TVB using `siibra-python`.\n",
- "### Focuses on retrieving and creating TVB Structural and Functional Connectivities\n",
- "##### Note: For the API to work, an EBRAINS authentication token needs to be set or the API should be used in an EBRAINS environment."
- ]
- },
- {
- "cell_type": "markdown",
- "id": "0f7a8ef2-b29f-4b1c-b6c7-1a90e97123a6",
- "metadata": {
- "tags": []
- },
- "source": [
- "### Make sure an EBRAINS token exists"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "4c73c307-fab9-433f-98ad-ea279b020c4d",
- "metadata": {},
- "outputs": [],
- "source": [
- "import os\n",
- "# Add an EBRAINS tokens if not done already\n",
- "if not 'HBP_AUTH_TOKEN' in os.environ:\n",
- " print(\"Missing auth token for siibra!!\")\n",
- "#os.environ['HBP_AUTH_TOKEN'] = ''"
+ "### Focuses on retrieving and creating TVB Structural and Functional Connectivities"
]
},
{
@@ -41,9 +16,7 @@
"metadata": {},
"outputs": [],
"source": [
- "# alterantively, use siibra service for authentication into EBRAINS\n",
- "import siibra\n",
- "#siibra.fetch_ebrains_token()"
+ "import siibra"
]
},
{
@@ -67,7 +40,7 @@
"id": "523bcf72-6f28-44bc-a50b-b17c08d92ac3",
"metadata": {},
"source": [
- "##### The results of this method are 2 **dictionaries**, containing either structural or functional connectivities. Each dictionary has as:\n",
+ "##### The results of this method are 2 **dictionaries**, containing structural and functional connectivities respectively. Each dictionary has as:\n",
"##### **- key**: the subject id\n",
"##### **- value**: the Structural/Functional TVB Connectivity for the corresponding subject"
]
@@ -76,11 +49,14 @@
"cell_type": "code",
"execution_count": null,
"id": "8b3ae241-a627-4f1e-9d41-df13ef5ac2cf",
- "metadata": {},
+ "metadata": {
+ "scrolled": true,
+ "tags": []
+ },
"outputs": [],
"source": [
"from tvb.adapters.creators.siibra_base import get_connectivities_from_kg\n",
- "struct_conn_dict, func_conn_dict = get_connectivities_from_kg('human', '2.9', 'HCP', '000-001', True)"
+ "struct_conn_dict, func_conn_dict = get_connectivities_from_kg('human', 'julich 3', 'HCP', '000-001', True)"
]
},
{
@@ -187,7 +163,7 @@
"metadata": {},
"outputs": [],
"source": [
- "struct_conn_dict, func_conn_dict = get_connectivities_from_kg('human', '2.9', '1000BRAINS', '0002', False)"
+ "struct_conn_dict, func_conn_dict = get_connectivities_from_kg('human', 'julich 2.9', '1000BRAINS', '0002', False)"
]
},
{
@@ -195,7 +171,8 @@
"id": "13b5eb7a-a678-4555-9cb1-bdce1eb2faec",
"metadata": {},
"source": [
- "##### Now there is one Structural connectivity:"
+ "##### Now there are 2* Structural connectivities for our subject:\n",
+ "##### **in this cohort, some subjects had 2 scanning sessions, resulting in 2 Structural connectivities*"
]
},
{
@@ -244,7 +221,7 @@
"outputs": [],
"source": [
"from tvb.adapters.creators.siibra_base import get_structural_connectivities_from_kg\n",
- "struct_conn_dict = get_structural_connectivities_from_kg('human', '2.9', 'HCP', '002')"
+ "struct_conn_dict = get_structural_connectivities_from_kg('human', '2.9', '1000BRAINS', '0002')"
]
},
{
@@ -285,6 +262,16 @@
"#### For this reason, to extract any FC from siibra, we must also provide a dictionary of corresponding Structural Connectivities."
]
},
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "fe0f4929-e878-4def-ae03-3fd099eb4580",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "struct_conn_dict = get_structural_connectivities_from_kg('human', 'julich 3', 'HCP', '002')"
+ ]
+ },
{
"cell_type": "code",
"execution_count": null,
@@ -318,14 +305,6 @@
"# inspecting a Functional Connectivity\n",
"func_conn_dict['002'][0]"
]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "803156a8-0727-4764-82eb-03f99c48af25",
- "metadata": {},
- "outputs": [],
- "source": []
}
],
"metadata": {
@@ -344,7 +323,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.10.4"
+ "version": "3.10.0"
}
},
"nbformat": 4,
diff --git a/tvb_framework/requirements.txt b/tvb_framework/requirements.txt
index 757d4f34f8..09b1aece2d 100644
--- a/tvb_framework/requirements.txt
+++ b/tvb_framework/requirements.txt
@@ -39,7 +39,7 @@ pandas
requests
simplejson
six
-siibra==0.4a35
+siibra==1.0a5
bctpy
kubernetes
watchdog
diff --git a/tvb_framework/setup.py b/tvb_framework/setup.py
index 94990eebd9..01b255be8c 100644
--- a/tvb_framework/setup.py
+++ b/tvb_framework/setup.py
@@ -43,7 +43,7 @@
TVB_INSTALL_REQUIREMENTS = ["alembic", "bctpy", "cherrypy", "docutils", "flask==2.3.3", "flask-restx",
"formencode", "gevent", "h5py", "Jinja2", "matplotlib==3.5.3", "nibabel", "numpy", "pandas",
"Pillow", "psutil", "python-keycloak", "requests", "requests-toolbelt>=0.10",
- "scikit-learn", "scipy", "siibra==0.4a35", "simplejson", "six", "sqlalchemy",
+ "scikit-learn", "scipy", "siibra==1.0a5", "simplejson", "six", "sqlalchemy",
"tables==3.7.0", "tvb-data", "tvb-gdist", "tvb-library", "tvb-storage", "werkzeug"]
# Packaging tvb-framework with REST server inside
diff --git a/tvb_framework/tvb/adapters/creators/siibra_base.py b/tvb_framework/tvb/adapters/creators/siibra_base.py
index 71441318fa..144739dbd1 100644
--- a/tvb_framework/tvb/adapters/creators/siibra_base.py
+++ b/tvb_framework/tvb/adapters/creators/siibra_base.py
@@ -38,9 +38,17 @@
LOGGER = get_logger(__name__)
-DEFAULT_ATLAS = 'Multilevel Human Atlas'
-DEFAULT_PARCELLATION = 'Julich-Brain Cytoarchitectonic Maps 2.9'
-DEFAULT_COHORT = 'HCP'
+# Concepts names
+# atlases
+HUMAN_ATLAS = 'Multilevel Human Atlas' # DEFAULT, only this atlas has Struct. Conn.
+
+# parcellations
+JULICH_3_0 = 'Julich-Brain Cytoarchitectonic Atlas (v3.0.3)' # DEFAULT
+JULICH_2_9 = 'Julich-Brain Cytoarchitectonic Atlas (v2.9)'
+parcellations = [JULICH_3_0, JULICH_2_9]
+
+# cohorts
+HCP_COHORT = 'HCP' # DEFAULT
THOUSAND_BRAINS_COHORT = '1000BRAINS'
@@ -54,8 +62,7 @@ class Component2Modality(Enum):
def get_cohorts_for_sc(parcellation_name):
"""
Given a parcellation name, return the name of all the cohorts related to it and containing data about
- Structural Connectivities. We chose to return the options for Struct. Conn., as, for the moment, the same values
- are returned for Functional Conn.
+ Structural Connectivities.
"""
parcellation = siibra.parcellations[parcellation_name]
features = siibra.features.get(parcellation, siibra.features.connectivity.StreamlineCounts)
@@ -76,8 +83,9 @@ def get_atlases_for_parcellation(parcelation):
def get_parcellations_for_atlas(atlas):
- """ Given the name of an atlas, return all the parcellations inside it """
- return list(atlas.parcellations)
+ """ Given an atlas, return a list of all the parcellations inside it, which contain Structural conns. """
+ parcellations_available = [p for p in list(atlas.parcellations) if p.name in parcellations]
+ return parcellations_available
def parse_subject_ids(subject_ids, cohort):
@@ -86,27 +94,20 @@ def parse_subject_ids(subject_ids, cohort):
"""
parsed_ids_as_str = []
individual_splits = subject_ids.split(';')
-
- if cohort == THOUSAND_BRAINS_COHORT:
- for s in individual_splits:
+ zfill_value = 3 if cohort == HCP_COHORT else 4 # used in case of ranges
+
+ for s in individual_splits:
+ # if a range was specified
+ if '-' in s:
+ start, end = s.split('-')
+ start_int = int(start)
+ end_int = int(end) + 1 # so that the last element in range is also included
+ ids_list_from_range = list(range(start_int, end_int))
+ parsed_ids_as_str.extend([str(id).zfill(zfill_value) for id in ids_list_from_range])
+ else:
parsed_ids_as_str.append(s)
- elif cohort == DEFAULT_COHORT:
- parsed_ids = []
- for s in individual_splits:
- # if a range was written
- if '-' in s:
- start, end = s.split('-')
- start_int = int(start)
- end_int = int(end) + 1 # so that the last element in range is also included
- ids_list_from_range = list(range(start_int, end_int))
- parsed_ids.extend(ids_list_from_range)
- else:
- s_int = int(s)
- parsed_ids.append(s_int)
-
- # convert the subject ids list back to strings into the required format
- parsed_ids_as_str = [str(id).zfill(3) for id in parsed_ids]
+ parsed_ids_as_str = sorted(set(parsed_ids_as_str))
return parsed_ids_as_str
@@ -118,7 +119,7 @@ def init_siibra_params(atlas_name, parcellation_name, cohort_name, subject_ids):
:param: cohort_name - name of cohort as str
:param: subject_ids - list of unparsed subject ids given as str
:return: (atlas, parcellation, cohort_name, subject_ids) - tuple containing a siibra atlas object,
- a siibra parcellation object and a cohort name all compatible with each other and a list of parsed ids
+ a siibra parcellation object and a cohort name, all compatible with each other, and a list of parsed ids
"""
# check that the atlas and the parcellation exist within siibra
atlas = siibra.atlases[atlas_name] if atlas_name else None
@@ -137,10 +138,11 @@ def init_siibra_params(atlas_name, parcellation_name, cohort_name, subject_ids):
no_parcellations = len(parcellations)
if no_parcellations < 1:
raise AttributeError(f'No default parcellation was found for atlas {atlas.name}!')
- if no_parcellations > 1:
+ elif no_parcellations > 1:
LOGGER.info(
f'Multiple parcellations were found for atlas {atlas.name}. An arbitrary one will be selected.')
- parcellation = parcellations[0]
+ # select the newest parcellation version
+ parcellation = [p for p in parcellations if p.is_newest_version][0]
if not atlas and parcellation:
LOGGER.warning('A parcellation was provided without an atlas, so a default atlas will be selected.')
@@ -148,7 +150,7 @@ def init_siibra_params(atlas_name, parcellation_name, cohort_name, subject_ids):
no_atlases = len(atlases)
if no_atlases < 1:
raise AttributeError(f'No default atlas containing parcellation {parcellation.name} was found!')
- if no_atlases > 1:
+ elif no_atlases > 1:
LOGGER.info(
f'Multiple atlases containing parcellation {parcellation_name} were found. '
f'An arbitrary one will be selected')
@@ -156,23 +158,24 @@ def init_siibra_params(atlas_name, parcellation_name, cohort_name, subject_ids):
if not atlas and not parcellation:
LOGGER.warning(f'No atlas and no parcellation were provided, so default ones will be selected.')
- atlas = siibra.atlases[DEFAULT_ATLAS]
- parcellation = siibra.parcellations[DEFAULT_PARCELLATION]
+ atlas = siibra.atlases[HUMAN_ATLAS]
+ parcellation = siibra.parcellations[JULICH_3_0]
LOGGER.info(f'Using atlas {atlas.name} and parcellation {parcellation.name}')
- # check the compatibility of cohort and parcellation
- cohort_options = get_cohorts_for_sc(parcellation.name)
- if cohort_name is None:
- cohort_name = DEFAULT_COHORT
- elif cohort_name not in cohort_options:
- raise ValueError(f'The cohort \"{cohort_name}\" is not available for parcellation \"{parcellation.name}\"!')
+ if cohort_name:
+ # check the compatibility of cohort and parcellation
+ cohort_options = get_cohorts_for_sc(parcellation.name)
+ if cohort_name not in cohort_options:
+ raise ValueError(f'The cohort \"{cohort_name}\" is not available for parcellation \"{parcellation.name}\"! '
+ f'Please choose one of the following cohorts: {cohort_options} or change '
+ f'the parcellation.')
+ else:
+ cohort_name = HCP_COHORT # compatible with all parcellations
# check subject ids
if not subject_ids:
- LOGGER.info(
- f'The mean across all connectivities from cohort {cohort_name} will be computed.')
- subject_ids = [None]
+ raise ValueError(f'Please provide at least one subject ID!')
else:
subject_ids = parse_subject_ids(subject_ids, cohort_name)
@@ -192,7 +195,6 @@ def get_hemispheres_for_regions(region_names):
for name in region_names:
if 'right' in name:
hemi.append(1)
- # TODO: regions referring to both hemispheres are put in the left hemisphere; change this?
else:
hemi.append(0)
@@ -202,17 +204,17 @@ def get_hemispheres_for_regions(region_names):
def get_regions_positions(regions):
"""
Given a list of siibra regions, compute the positions of their centroids.
- :param: regions - list of siibra region objects
- :return: positions - list of tuples; each tuple represents the position of a region in `regions` and contains
+ :param: regions - list of siibra Regions
+ :return: positions - list of tuples; each tuple represents the position of a region and contains
3 floating point coordinates
"""
LOGGER.info(f'Computing positions for regions')
positions = []
- space = siibra.spaces.MNI_152_ICBM_2009C_NONLINEAR_ASYMMETRIC # commonly used space in the documentation
for r in regions:
- centroid = r.spatial_props(space)['components'][0]['centroid'].coordinate
- positions.append(centroid)
+ space = r.supported_spaces[0] # choose first space that is available for that region
+ centroid = r.spatial_props(space=space).components[0].centroid
+ positions.append(centroid.coordinate)
return positions
@@ -228,7 +230,7 @@ def get_connectivity_matrix(parcellation, cohort, subjects, component):
:param: cohort - name of cohort for which we compute the connectivity matrices
:param: subjects - list containing the subject ids as strings
:param: component - enum value specifying the connectivity component we want, weights or tracts
- return: conn_matrices - dict containing the conn. matrices (values) for the specified subject ids (keys)
+ return: conn_matrices - dict where key is the subject id and value is the conn. matrix
"""
modality = component.value
features = siibra.features.get(parcellation, modality)
@@ -241,18 +243,17 @@ def get_connectivity_matrix(parcellation, cohort, subjects, component):
break
if conn_for_cohort is None:
- LOGGER.info("NO conn_for_cohort was found")
- return conn_matrices
+ raise AttributeError(f"NO {modality} was found for cohort {cohort}")
# for 1000BRAINS cohort, if the user did not specify a suffix (_1, _2), get all the possible ids for that subject
if cohort == THOUSAND_BRAINS_COHORT and subjects is not None:
- subjects = [s for s in sorted(conn_for_cohort.subjects) if any(x in s for x in subjects)]
+ all_subjects = sorted([conn.subject for conn in conn_for_cohort.elements])
+ subjects = [s for s in all_subjects if any(x in s for x in subjects)]
# get the conn. matrices for each specified subject
for s in subjects:
- if s is None:
- s = 'mean'
- matrix = conn_for_cohort.get_matrix(s)
+ conn = [c for c in conn_for_cohort if c.subject == s][0]
+ matrix = conn.data
conn_matrices[s] = matrix
LOGGER.info(f'{component.name} for subject {s} retrieved successfully.')
@@ -288,7 +289,7 @@ def get_structural_connectivities_from_kg(atlas=None, parcellation=None, cohort=
:param: atlas - str specifying the atlas name
:param: parcellation - str specifying the parcellation name
:param: cohort - str specifying the cohort name
- :param: subject_ids - unparsed str specifying the subject ids for which the connectivities will be computed
+ :param: subject_ids - unparsed str specifying the subject ids for which the connectivities will be retrived
:return: connectivities - dict containing tvb structural Connectivities as values and the subject ids as keys
"""
atlas, parcellation, cohort, subject_ids = init_siibra_params(atlas, parcellation, cohort, subject_ids)
@@ -298,8 +299,6 @@ def get_structural_connectivities_from_kg(atlas=None, parcellation=None, cohort=
# regions are the same for all weights and tract lengths matrices, so they can be computed only once
regions = list(weights.values())[0].index.values
- # because siibra sometimes returns tuples instead of actual regions, change list to contain only regions
- regions = [r[1] if type(r) == tuple else r for r in regions]
region_names = [r.name for r in regions]
hemi = get_hemispheres_for_regions(region_names)
positions = get_regions_positions(regions)
@@ -310,8 +309,8 @@ def get_structural_connectivities_from_kg(atlas=None, parcellation=None, cohort=
tracts_matrix = tracts[subject]
tvb_conn = create_tvb_structural_connectivity(weights_matrix, tracts_matrix, region_names, hemi, positions)
- # structural connectivities stored as dict, to link a functional connectivity with the correct
- # structural connectivity when creating connectivity measures
+ # structural connectivities stored as dict, where key is subject id, as we need it when computing connectivity
+ # measures
connectivities[subject] = tvb_conn
return connectivities
@@ -321,14 +320,14 @@ def get_structural_connectivities_from_kg(atlas=None, parcellation=None, cohort=
def get_functional_connectivity_matrix(parcellation, cohort, subject):
"""
Get all the functional connectivities for the specified parcellation, cohort and just ONE specific subject;
- In v0.4a35 of siibra, functional connectivities belonging to the same cohort can be split into multiple (5)
- siibra FunctionalConnectivity objects
+ In v1.0a5 of siibra, for HCP cohort there are 5 groups of functional connectivities; each group contains
+ 1 Functional connectivity for each subject addressed in the research
:param: parcellation - siibra Parcellation object
:param: cohort - str specifying the cohort name
:param: subject - str specifying exactly one subject id
:return: (fcs_list, fcs_names_list) - tuple containing 2 lists; `fcs_list` contains pandas.Dataframe matrices and
`fcs_names_list` contains the name for each matrix from the previous list, obtained from the file they are stored
- in in the KG
+ in the KG
"""
modality = Component2Modality.FUNCTIONAL_CONNECTIVITY.value
features = siibra.features.get(parcellation, modality)
@@ -337,39 +336,26 @@ def get_functional_connectivity_matrix(parcellation, cohort, subject):
for f in features:
if f.cohort == cohort:
- fc_matrix = f.get_matrix(subject)
- fcs_names_list.append(f._files[subject])
+ f_conn = [c for c in f.elements if c.subject == subject][0]
+ fc_matrix = f_conn.data
+ fcs_names_list.append(f_conn.name)
fcs_list.append(fc_matrix)
return fcs_list, fcs_names_list
-def get_fc_name_from_file_path(path_to_file):
- """
- Given the entire path to a file containing a siibra FunctionalConnectivity, return just the filename
- Note: highly dependent on KG/siibra storage conventions
- :param: path_to_file - str representing the path to a Functional Connectivity from the KG
- :return: filename - just the filename (without the extension)
- """
- file_with_extension = path_to_file.rsplit('/', 1)[1]
- filename = file_with_extension.rsplit('.', 1)[0]
-
- return filename
-
-
-def create_tvb_connectivity_measure(siibra_fc, structural_connectivity, siibra_fc_filename):
+def create_tvb_connectivity_measure(siibra_fc, structural_connectivity, siibra_fc_name):
"""
Given a FunctionalConnectivity from siibra TVB Structural Connectivity (both for the same subject),
return a TVB ConnectivityMeasure containing those 2 connectivities
:param: siibra_fc - pandas.Dataframe matrix from siibra containing a functional connectivity
:param: structural_connectivity - a TVB structural connectivity
- :param: siibra_fc_filename - the name of the file containing the functional connectivity from siibra
+ :param: siibra_fc_name - the name of the siibra functional connectivity object
:return: conn_measure - tvb.datatypes.graph.ConnectivityMeasure representing a functional connectivity
"""
fc_matrix = siibra_fc.to_numpy()
conn_measure = ConnectivityMeasure(array_data=fc_matrix, connectivity=structural_connectivity)
- conn_measure.title = get_fc_name_from_file_path(siibra_fc_filename)
-
+ conn_measure.title = siibra_fc_name
return conn_measure
@@ -381,7 +367,7 @@ def get_connectivity_measures_from_kg(atlas=None, parcellation=None, cohort=None
:param: atlas - str specifying the atlas name
:param: parcellation - str specifying the parcellation name
:param: cohort - str specifying the cohort name
- :param: subject_ids - unparsed str specifying the subject ids for which the connectivities will be computed
+ :param: subject_ids - unparsed str specifying the subject ids for which the connectivities will be retrieved
:param: structural_connectivities - dict of TVB Structural Connectivities computed for the subjects from
`subject_ids`, where subject ids are keys and the structural connectivities are values
:return: conn_measures - dict containing TVB Connectivity Measures as values and the subject ids as keys
@@ -392,14 +378,11 @@ def get_connectivity_measures_from_kg(atlas=None, parcellation=None, cohort=None
# for 1000BRAINS cohort, if the user did not specify a suffix (_1, _2), get all the possible ids for that subject
if cohort == THOUSAND_BRAINS_COHORT and any('_' not in s for s in subject_ids):
f = [f for f in siibra.features.get(parcellation, siibra.features.connectivity.FunctionalConnectivity)
- if f.cohort == THOUSAND_BRAINS_COHORT]
- f = f[0] # get the first feature from the feature list, we just want to know the subject names
- subjects_for_cohort = f.subjects
+ if f.cohort == THOUSAND_BRAINS_COHORT][0] # there is only one FC group for this 1000BRAINS
+ subjects_for_cohort = [f_conn.subject for f_conn in f]
subject_ids = [s for s in sorted(subjects_for_cohort) if any(x in s for x in subject_ids)]
for s in subject_ids:
- if s is None:
- s = 'mean'
conn_measures[s] = []
sc = structural_connectivities[s]
@@ -414,14 +397,14 @@ def get_connectivity_measures_from_kg(atlas=None, parcellation=None, cohort=None
# ################################################# FINAL API ##########################################################
-def get_connectivities_from_kg(atlas=None, parcellation=None, cohort=DEFAULT_COHORT,
+def get_connectivities_from_kg(atlas=None, parcellation=None, cohort=HCP_COHORT,
subject_ids=None, compute_fc=False):
"""
Compute the TVB Structural Connectivities and optionally Functional Connectivities for the selected subjects
:param: atlas - str specifying the atlas name
:param: parcellation - str specifying the parcellation name
:param: cohort - str specifying the cohort name
- :param: subject_ids - unparsed str specifying the subject ids for which the connectivities will be computed
+ :param: subject_ids - unparsed str specifying the subject ids for which the connectivities will be retrieved
:param: compute_fc - boolean value indicating if for the specified subjects the functional connectivities should
also be retrieved
:return: (sc_dict, conn_measures_dict) - tuple containing 2 dictionaries: one with structural connectivities and
diff --git a/tvb_framework/tvb/adapters/creators/siibra_creator.py b/tvb_framework/tvb/adapters/creators/siibra_creator.py
index 416724c688..305cd563b9 100644
--- a/tvb_framework/tvb/adapters/creators/siibra_creator.py
+++ b/tvb_framework/tvb/adapters/creators/siibra_creator.py
@@ -41,25 +41,20 @@
from tvb.core.adapters.abcadapter import ABCAdapterForm, ABCAdapter
from tvb.core.neotraits.forms import StrField, SelectField, BoolField, UserSessionStrField
from tvb.core.neotraits.view_model import ViewModel, Str
-from tvb.core.services.user_service import KEY_AUTH_TOKEN
-
-CLB_AUTH_TOKEN_KEY = 'HBP_AUTH_TOKEN'
# Following code is executed only once, when the application starts running
def init_siibra_options():
""""
- Initialize siibra options for atlas and parcellations
+ Initialize siibra options for atlas, parcellations and cohorts.
+ These options were manually selected and are the only ones having struct. conn. right now. This may change in the
+ future.
"""
- # should use `atlases = [a.name for a in list(siibra.atlases)]`, but only the default one has data
- atlases = [siibra_base.DEFAULT_ATLAS] # list with atlases names
- # should get only valid parcellations for default atlas, but only newest version of Julich parcellation
- # has data and corresponds with the current API of siibra
- parcellations = [siibra_base.DEFAULT_PARCELLATION]
-
- # get available cohorts
- cohorts = siibra_base.get_cohorts_for_sc(parcellations[0])
+ atlases = [siibra_base.HUMAN_ATLAS]
+ parcellations = [siibra_base.JULICH_3_0, siibra_base.JULICH_2_9]
+ cohorts = [siibra_base.HCP_COHORT, siibra_base.THOUSAND_BRAINS_COHORT]
+ # create dicts needed for TVB Enums
atlas_dict = {a_name: a_name for a_name in atlases}
parcellation_dict = {p_name: p_name for p_name in parcellations}
cohort_dict = {(y := c_name.upper()): y for c_name in cohorts}
@@ -77,31 +72,25 @@ def init_siibra_options():
class SiibraModel(ViewModel):
- ebrains_token = Str(
- label='EBRAINS token',
- required=True,
- doc='Auth Token provided by EBRAINS lab `clb_oauth.get_token()` for accessing the Knowledge Graph'
- )
-
atlas = EnumAttr(
field_type=ATLAS_OPTS,
- default=ATLAS_OPTS[siibra_base.DEFAULT_ATLAS],
+ default=ATLAS_OPTS[siibra_base.HUMAN_ATLAS],
label='Atlas',
required=True,
- doc='Atlas to be used (only the compatible ones listed)'
+ doc='Atlas to be used'
)
parcellation = EnumAttr(
field_type=PARCELLATION_OPTS,
- default=PARCELLATION_OPTS[siibra_base.DEFAULT_PARCELLATION],
+ default=PARCELLATION_OPTS[siibra_base.JULICH_3_0],
label='Parcellation',
required=True,
- doc='Parcellation to be used (only TVB compatible ones listed here)'
+ doc='Parcellation to be used'
)
cohort = EnumAttr(
field_type=COHORT_OPTS,
- default=COHORT_OPTS[siibra_base.DEFAULT_COHORT],
+ default=COHORT_OPTS[siibra_base.HCP_COHORT],
label='Cohort',
required=True,
doc='Cohort to be used'
@@ -120,13 +109,19 @@ class SiibraModel(ViewModel):
subject 000 until subject 050 (51 subjects).
A combination of the 2 methods is also supported: 000-005;010 will retrieve all the subjects starting with
subject 000 until subject 005 (6 subjects) AND subject 010 (so 7 subjects in total)
- b) For "1000BRAINS" cohort, the subject IDs are: 0001_1, 0001_2, 0002_1, 0002_2, etc. Each subject can have
- multiple subjects IDs associated to them, indicated by the "_1", "_2" suffix, but most of subjects have
- just one ID, ending in "_1". Thus, there are 2 ways to specify the IDs:
- 1. individually and specifying the exact ID, so including "_1" or "_2". Multiple IDs can be mentioned
- by using a semicolon symbol to delimitate them: 0001_1;0017_1;0017_2.
- 2. individually, and specifying just the prefix for a subject. Multiple IDs can be mentioned by using a
- semicolon symbol to delimitate them: 0001;0017 will be converted to 4 IDs: 0001_1, 0001_2, 0017_1, 0017_2.
+ b) For "1000BRAINS" cohort, the subject IDs have to parts: first part is the subject ID, which has the form:
+ 0001, 0002, etc., and the second part is the scanning session index, which has the form _1, _2. All subjects
+ had between 1 and 2 scanning sessions. Thus, the final IDs will look like: 0001_1, 0001_2, 0002_1, etc. and
+ there are 3 ways to specify the IDs:
+ 1. individually and specifying the exact ID, including the session index "_1" or "_2". Multiple IDs can be
+ mentioned by using a semicolon symbol to delimit them: 0001_1;0017_1;0017_2.
+ 2. individually, and without specifying the session index. In this case, all available sessions for that subject
+ will be retrieved. Multiple IDs can be mentioned by using a semicolon symbol to delimit them: 0001;0017 will be
+ converted to 4 IDs: 0001_1, 0001_2, 0017_1, 0017_2.
+ 3. As a range, specifying only the subject ids and not the session ids: 0001-0003 will retrieve all the
+ available sessions for subjects 1, 2, 3, i.e.: 0001_1, 0001_2, 0002_1, 0002_2, 0003_1 and 0003_2.
+ A combination of the 3 methods is also supported: 0001-0003;0005_1;0009 will retrieve connectivities for the
+ following IDs: 0001_1, 0001_2, 0002_1, 0002_2, 0003_1, 0003_2, 0005_1, 0009_1, 0009_2.
""")
fc = Attr(
@@ -134,14 +129,13 @@ class SiibraModel(ViewModel):
label="Compute Functional Connectivities",
default=True,
required=True,
- doc="Set if the functional connectivities for the specified subjects should also be computed"
+ doc="Flag to specify if the functional connectivities for the selected subjects should also be computed"
)
class SiibraCreatorForm(ABCAdapterForm):
def __init__(self):
super(SiibraCreatorForm, self).__init__()
- self.ebrains_token = UserSessionStrField(SiibraModel.ebrains_token, name="ebrains_token", key=KEY_AUTH_TOKEN)
self.atlas = SelectField(SiibraModel.atlas, name='atlas')
self.parcellation = SelectField(SiibraModel.parcellation, name='parcellation')
self.cohort = SelectField(SiibraModel.cohort, name='cohort')
@@ -169,7 +163,7 @@ class SiibraCreator(ABCAdapter):
""" The purpose of this creator is to use siibra in order to create Structural and Functional Connectivities """
_ui_name = "Siibra Connectivity Creator"
- _ui_description = "Create Structural and Functional Connectivities from the EBRAINS KG using siibra"
+ _ui_description = "Create Structural and Functional Connectivities with data from the EBRAINS KG using siibra"
def get_form_class(self):
return SiibraCreatorForm
@@ -178,29 +172,24 @@ def get_output(self):
return [ConnectivityIndex, ConnectivityMeasureIndex]
def launch(self, view_model):
- ebrains_token = view_model.ebrains_token
atlas = view_model.atlas.value
parcellation = view_model.parcellation.value
cohort = view_model.cohort.value
subject_ids = view_model.subject_ids
compute_fc = view_model.fc
- os.environ[CLB_AUTH_TOKEN_KEY] = ebrains_token
-
# list of all resulting indices for connectivities and possibly connectivity measures
results = []
try:
conn_dict, conn_measures_dict = siibra_base.get_connectivities_from_kg(atlas, parcellation, cohort,
subject_ids, compute_fc)
- except SiibraHttpRequestError as e:
- if e.response.status_code in [401, 403]:
- raise ConnectionError('Invalid EBRAINS authentication token. Please provide a new one.')
- else:
- raise ConnectionError('We could not complete the operation. '
- 'Please check the logs and contact the development team from TVB, siibra or EBRAINS KG.')
-
- # list of indexes for stored the Struct. Conn. and Conn. Measures
+ except SiibraHttpRequestError:
+ raise ConnectionError('We could not complete the operation. '
+ 'Please check the logs and contact the development team from TVB, siibra or '
+ 'EBRAINS KG.')
+
+ # list of indexes of stored Struct. Conn. and Conn. Measures
conn_indices = []
conn_measures_indices = []
diff --git a/tvb_framework/tvb/tests/framework/adapters/creators/siibra_base_test.py b/tvb_framework/tvb/tests/framework/adapters/creators/siibra_base_test.py
index 017575c222..22ce136c08 100644
--- a/tvb_framework/tvb/tests/framework/adapters/creators/siibra_base_test.py
+++ b/tvb_framework/tvb/tests/framework/adapters/creators/siibra_base_test.py
@@ -31,36 +31,26 @@
import pytest
import siibra
from tvb.adapters.creators import siibra_base as sb
-from tvb.adapters.creators.siibra_creator import CLB_AUTH_TOKEN_KEY
from tvb.datatypes import connectivity, graph
from tvb.tests.framework.core.base_testcase import BaseTestCase
-
-def no_ebrains_auth_token():
- hbp_auth = os.environ.get(CLB_AUTH_TOKEN_KEY)
- client_id = os.environ.get("KEYCLOAK_CLIENT_ID")
- # KEYCLOAK_CLIENT_ID has priority for us in unit-tests
- if client_id and CLB_AUTH_TOKEN_KEY in os.environ:
- del os.environ[CLB_AUTH_TOKEN_KEY]
- return (not hbp_auth) and (not client_id)
-
-
HUMAN_ATLAS = 'Multilevel Human Atlas'
-MONKEY_ATLAS = 'Monkey Atlas (pre-release)'
-JULICH_PARCELLATION = 'Julich-Brain Cytoarchitectonic Maps 2.9'
-MONKEY_PARCELLATION = 'Non-human primate'
+MONKEY_ATLAS = 'Monkey Atlas'
+JULICH_PARCELLATION_3_0 = 'Julich 3'
+JULICH_PARCELLATION_2_9 = 'Julich 2.9'
+MONKEY_PARCELLATION = 'MEBRAINS population-based monkey parcellation'
DEFAULT_HCP_SUBJECT = ['000']
DEFAULT_1000BRAINS_SUBJECT = ['0001_1']
-@pytest.mark.skipif(no_ebrains_auth_token(), reason="No EBRAINS AUTH token for accesing the KG was provided!")
class TestSiibraBase(BaseTestCase):
@pytest.fixture()
def create_test_atlases_and_parcellations(self):
self.human_atlas = siibra.atlases[HUMAN_ATLAS]
self.monkey_atlas = siibra.atlases[MONKEY_ATLAS]
- self.julich_parcellation = siibra.parcellations[JULICH_PARCELLATION]
+ self.julich_parcellation_3_0 = siibra.parcellations[JULICH_PARCELLATION_3_0]
+ self.julich_parcellation_2_9 = siibra.parcellations[JULICH_PARCELLATION_2_9]
self.monkey_parcellation = siibra.parcellations[MONKEY_PARCELLATION]
@pytest.fixture()
@@ -68,8 +58,8 @@ def create_weights_and_tracts(self, create_test_atlases_and_parcellations):
"""
Return all the weights and tracts available in siibra for default atlas and parcellation
"""
- weights = siibra.get_features(self.julich_parcellation, siibra.modalities.StreamlineCounts)
- tracts = siibra.get_features(self.julich_parcellation, siibra.modalities.StreamlineLengths)
+ weights = siibra.get_features(self.julich_parcellation_3_0, siibra.modalities.StreamlineCounts)
+ tracts = siibra.get_features(self.julich_parcellation_3_0, siibra.modalities.StreamlineLengths)
self.weights = weights
self.tracts = tracts
@@ -78,17 +68,20 @@ def create_siibra_functional_connectivities(self, create_test_atlases_and_parcel
"""
Return all the functional connectivities available in siibra for default atlas and parcellation
"""
- features = siibra.features.get(self.julich_parcellation, siibra.features.connectivity.FunctionalConnectivity)
- f = features[0]
- fc = f.get_matrix()
+ features = siibra.features.get(self.julich_parcellation_3_0,
+ siibra.features.connectivity.FunctionalConnectivity)
+ features_for_cohort = features[0]
+ f = features_for_cohort[0]
+ fc = f.data
self.fc = fc
def test_check_atlas_parcellation_compatible(self, create_test_atlases_and_parcellations):
- assert sb.check_atlas_parcellation_compatible(self.human_atlas, self.julich_parcellation)
- assert not sb.check_atlas_parcellation_compatible(self.monkey_atlas, self.julich_parcellation)
+ assert sb.check_atlas_parcellation_compatible(self.human_atlas, self.julich_parcellation_3_0)
+ assert sb.check_atlas_parcellation_compatible(self.monkey_atlas, self.monkey_parcellation)
+ assert not sb.check_atlas_parcellation_compatible(self.monkey_atlas, self.julich_parcellation_3_0)
def test_get_atlases_for_parcellation(self, create_test_atlases_and_parcellations):
- atlas_list = sb.get_atlases_for_parcellation(self.julich_parcellation)
+ atlas_list = sb.get_atlases_for_parcellation(self.julich_parcellation_3_0)
assert atlas_list
assert self.human_atlas in atlas_list
assert self.monkey_atlas not in atlas_list
@@ -96,25 +89,34 @@ def test_get_atlases_for_parcellation(self, create_test_atlases_and_parcellation
def test_get_parcellations_for_atlas(self, create_test_atlases_and_parcellations):
parcellation_list = sb.get_parcellations_for_atlas(self.human_atlas)
assert parcellation_list
- assert self.julich_parcellation in parcellation_list
+ assert self.julich_parcellation_3_0 in parcellation_list
assert self.monkey_parcellation not in parcellation_list
+ def test_get_cohorts_for_sc(self, create_test_atlases_and_parcellations):
+ cohorts_julich_3 = sb.get_cohorts_for_sc(self.julich_parcellation_3_0)
+ assert sb.HCP_COHORT in cohorts_julich_3
+ assert sb.THOUSAND_BRAINS_COHORT not in cohorts_julich_3
+
+ cohorts_julich_2_9 = sb.get_cohorts_for_sc(self.julich_parcellation_2_9)
+ assert sb.HCP_COHORT in cohorts_julich_2_9
+ assert sb.THOUSAND_BRAINS_COHORT in cohorts_julich_2_9
+
def test_parse_subject_ids(self):
# for HCP cohort
single_id = '000'
- assert sb.parse_subject_ids(single_id, sb.DEFAULT_COHORT) == ['000']
+ assert sb.parse_subject_ids(single_id, sb.HCP_COHORT) == ['000']
multiple_ids = '000;010'
- assert sb.parse_subject_ids(multiple_ids, sb.DEFAULT_COHORT) == ['000', '010']
+ assert sb.parse_subject_ids(multiple_ids, sb.HCP_COHORT) == ['000', '010']
range_ids = '000-002'
- assert sb.parse_subject_ids(range_ids, sb.DEFAULT_COHORT) == ['000', '001', '002']
+ assert sb.parse_subject_ids(range_ids, sb.HCP_COHORT) == ['000', '001', '002']
range_and_multiple_ids = '000-002;010'
- assert sb.parse_subject_ids(range_and_multiple_ids, sb.DEFAULT_COHORT) == ['000', '001', '002', '010']
+ assert sb.parse_subject_ids(range_and_multiple_ids, sb.HCP_COHORT) == ['000', '001', '002', '010']
range_and_multiple_ids2 = '100;000-002;010'
- assert sb.parse_subject_ids(range_and_multiple_ids2, sb.DEFAULT_COHORT) == ['100', '000', '001', '002', '010']
+ assert sb.parse_subject_ids(range_and_multiple_ids2, sb.HCP_COHORT) == ['000', '001', '002', '010', '100']
# for 1000BRAINS cohort
single_full_id = '0001_1'
@@ -129,35 +131,53 @@ def test_parse_subject_ids(self):
multiple_partial_ids = '0010;0017'
assert sb.parse_subject_ids(multiple_partial_ids, sb.THOUSAND_BRAINS_COHORT) == ['0010', '0017']
+ range_ids = '0001-0003'
+ assert sb.parse_subject_ids(range_ids, sb.THOUSAND_BRAINS_COHORT) == ['0001', '0002', '0003']
+
+ range_and_ids = '0010;0001-0003;0017'
+ assert sb.parse_subject_ids(range_and_ids, sb.THOUSAND_BRAINS_COHORT) == ['0001', '0002', '0003', '0010',
+ '0017']
+
def test_init_siibra_params_no_selections(self, create_test_atlases_and_parcellations):
""""
Test initialization of siibra paramas when no sellection was made for atlas, parcellation or subject ids
"""
- empty_params_config = sb.init_siibra_params(None, None, None, None)
+ with pytest.raises(ValueError) as error_info:
+ sb.init_siibra_params(None, None, None, None)
+ assert str(error_info.value) == 'Please provide at least one subject ID!'
+
+ def test_init_siibra_params_for_defaults(self, create_test_atlases_and_parcellations):
+ """"
+ Test initialization of siibra paramas when no sellection was made for atlas, parcellation and cohort, but
+ a subject ID was mentioned
+ """
+ # subject id is mandatory, otherwise execption
+ empty_params_config = sb.init_siibra_params(None, None, None, '000')
atlas, parcellation, cohort, subject_ids = empty_params_config
assert atlas == self.human_atlas
- assert parcellation == self.julich_parcellation
- assert cohort == sb.DEFAULT_COHORT
- assert subject_ids == [None]
+ assert parcellation == self.julich_parcellation_3_0
+ assert cohort == sb.HCP_COHORT
+ assert subject_ids == ['000']
def test_init_siibra_params_atlas_selected(self, create_test_atlases_and_parcellations):
""""
Test initialization of siibra paramas when only the atlas was selected
"""
- _, parcellation, cohort, subject_ids = sb.init_siibra_params(self.human_atlas, None, None, None)
+ _, parcellation, cohort, subject_ids = sb.init_siibra_params(self.human_atlas, None, None, '000')
assert parcellation is not None
+ assert parcellation is self.julich_parcellation_3_0
assert parcellation in list(self.human_atlas.parcellations)
- assert cohort == sb.DEFAULT_COHORT
- assert subject_ids == [None]
+ assert cohort == sb.HCP_COHORT
+ assert subject_ids == ['000']
def test_init_siibra_params_parcellation_selected(self, create_test_atlases_and_parcellations):
""""
Test initialization of siibra paramas when only the parcellation was selected
"""
- atlas, _, cohort, subject_ids = sb.init_siibra_params(None, self.julich_parcellation, None, None)
+ atlas, _, cohort, subject_ids = sb.init_siibra_params(None, self.julich_parcellation_2_9, None, '000')
assert atlas == self.human_atlas
- assert cohort == sb.DEFAULT_COHORT
- assert subject_ids == [None]
+ assert cohort == sb.HCP_COHORT
+ assert subject_ids == ['000']
def test_init_siibra_params_subjects_selected(self, create_test_atlases_and_parcellations):
""""
@@ -165,34 +185,33 @@ def test_init_siibra_params_subjects_selected(self, create_test_atlases_and_parc
"""
atlas, parcellation, cohort, subject_ids = sb.init_siibra_params(None, None, None, '000;001')
assert atlas == self.human_atlas
- assert parcellation is not None
assert parcellation in list(atlas.parcellations)
- assert cohort == sb.DEFAULT_COHORT
+ assert cohort == sb.HCP_COHORT
assert subject_ids == ['000', '001']
def test_get_connectivity_matrix(self, create_test_atlases_and_parcellations):
"""
Test the retrieval of structural connectivities (weights and tracts) and functional connectivities
"""
- weights = sb.get_connectivity_matrix(self.julich_parcellation, sb.DEFAULT_COHORT, DEFAULT_HCP_SUBJECT,
+ weights = sb.get_connectivity_matrix(self.julich_parcellation_3_0, sb.HCP_COHORT, DEFAULT_HCP_SUBJECT,
sb.Component2Modality.WEIGHTS)
assert len(weights) > 0
assert DEFAULT_HCP_SUBJECT[0] in weights
- assert type(weights[DEFAULT_HCP_SUBJECT[0]]) == pandas.core.frame.DataFrame
+ assert isinstance(weights[DEFAULT_HCP_SUBJECT[0]], pandas.core.frame.DataFrame)
- tracts = sb.get_connectivity_matrix(self.julich_parcellation, sb.DEFAULT_COHORT, DEFAULT_HCP_SUBJECT,
+ tracts = sb.get_connectivity_matrix(self.julich_parcellation_3_0, sb.HCP_COHORT, DEFAULT_HCP_SUBJECT,
sb.Component2Modality.TRACTS)
assert len(tracts) > 0
assert DEFAULT_HCP_SUBJECT[0] in tracts
- assert type(tracts[DEFAULT_HCP_SUBJECT[0]]) == pandas.core.frame.DataFrame
+ assert isinstance(tracts[DEFAULT_HCP_SUBJECT[0]], pandas.core.frame.DataFrame)
def test_get_functional_connectivity_matrix(self, create_test_atlases_and_parcellations):
- fcs, fcs_names = sb.get_functional_connectivity_matrix(self.julich_parcellation, sb.DEFAULT_COHORT,
+ fcs, fcs_names = sb.get_functional_connectivity_matrix(self.julich_parcellation_3_0, sb.HCP_COHORT,
DEFAULT_HCP_SUBJECT[0])
assert len(fcs) > 0
assert len(fcs_names) > 0
assert len(fcs) == len(fcs_names)
- assert type(fcs[0]) == pandas.core.frame.DataFrame
+ assert isinstance(fcs[0], pandas.core.frame.DataFrame)
def test_get_hemispheres_for_regions(self):
reg_names = ['reg1_right', 'reg1_left', 'reg_2']
@@ -200,7 +219,7 @@ def test_get_hemispheres_for_regions(self):
assert hemi == [1, 0, 0]
def test_get_regions_positions(self, create_test_atlases_and_parcellations):
- region = self.julich_parcellation.get_region('v1')
+ region = self.julich_parcellation_3_0.get_region('v1')
assert region.name == 'Area hOc1 (V1, 17, CalcS)'
reg_coord = sb.get_regions_positions([region])[0]
assert len(reg_coord) == 3
@@ -227,63 +246,49 @@ def test_create_tvb_structural_connectivity(self):
assert (tvb_conn.hemispheres == hemi).all()
def test_get_structural_connectivities_from_kg(self, create_test_atlases_and_parcellations):
- tvb_conns = sb.get_structural_connectivities_from_kg(self.human_atlas, self.julich_parcellation,
- sb.DEFAULT_COHORT, '001')
+ tvb_conns = sb.get_structural_connectivities_from_kg(self.human_atlas, self.julich_parcellation_2_9,
+ sb.THOUSAND_BRAINS_COHORT, '0001_1')
assert len(tvb_conns) == 1
- assert list(tvb_conns.keys()) == ['001']
- assert type(tvb_conns['001']) == connectivity.Connectivity
-
- def test_get_connectivity_measures_from_kg(self, create_test_atlases_and_parcellations):
- struct_conn = sb.get_structural_connectivities_from_kg(self.human_atlas, self.julich_parcellation,
- sb.THOUSAND_BRAINS_COHORT, '0017')
-
- tvb_conn_measures = sb.get_connectivity_measures_from_kg(self.human_atlas, self.julich_parcellation,
- sb.THOUSAND_BRAINS_COHORT, '0017', struct_conn)
-
- assert len(tvb_conn_measures) == 2
- assert list(tvb_conn_measures.keys()) == ['0017_1', '0017_2']
- assert type(tvb_conn_measures['0017_1'][0]) == graph.ConnectivityMeasure
-
- def test_get_fc_name_from_file_path(self):
- path = 'c/users/user1/FunctionalConnectivity.Name.csv'
- name = 'FunctionalConnectivity.Name'
-
- assert sb.get_fc_name_from_file_path(path) == name
+ assert list(tvb_conns.keys()) == ['0001_1']
+ assert isinstance(tvb_conns['0001_1'], connectivity.Connectivity)
def test_create_tvb_connectivity_measure(self, create_siibra_functional_connectivities):
conn = connectivity.Connectivity.from_file("connectivity_192.zip")
fc = self.fc
# the FC and SC are not compatible, but are used together only for testing purposes
- tvb_conn_measure = sb.create_tvb_connectivity_measure(fc, conn, 'c/users/user1/FunctionalConnectivity.Name.csv')
+ tvb_conn_measure = sb.create_tvb_connectivity_measure(fc, conn, 'StreamlineCounts (StreamlineCounts) anchored '
+ 'at Julich-Brain Cytoarchitectonic Atlas '
+ '(v2.9) with cohort HCP - 000')
assert (tvb_conn_measure.array_data == fc.to_numpy()).all()
assert tvb_conn_measure.connectivity is conn
- assert tvb_conn_measure.title == 'FunctionalConnectivity.Name'
+ assert tvb_conn_measure.title == ('StreamlineCounts (StreamlineCounts) anchored at Julich-Brain '
+ 'Cytoarchitectonic Atlas (v2.9) with cohort HCP - 000')
def test_get_connectivity_measures_from_kg(self, create_test_atlases_and_parcellations):
sc1 = connectivity.Connectivity.from_file("connectivity_76.zip")
scs = {'001': sc1}
- tvb_conn_measures = sb.get_connectivity_measures_from_kg(self.human_atlas, self.julich_parcellation,
- sb.DEFAULT_COHORT, '001', scs)
+ tvb_conn_measures = sb.get_connectivity_measures_from_kg(self.human_atlas, self.julich_parcellation_3_0,
+ sb.HCP_COHORT, '001', scs)
assert len(tvb_conn_measures) == 1
assert len(tvb_conn_measures['001']) == 5
assert (list(tvb_conn_measures.keys()) == ['001'])
- assert type(tvb_conn_measures['001'][0]) == graph.ConnectivityMeasure
+ assert isinstance(tvb_conn_measures['001'][0], graph.ConnectivityMeasure)
assert tvb_conn_measures['001'][0].connectivity is sc1
sc2 = connectivity.Connectivity.from_file("connectivity_66.zip")
sc3 = connectivity.Connectivity.from_file("connectivity_68.zip")
scs2 = {'0017_1': sc2, '0017_2': sc3}
- tvb_conn_measures2 = sb.get_connectivity_measures_from_kg(self.human_atlas, self.julich_parcellation,
+ tvb_conn_measures2 = sb.get_connectivity_measures_from_kg(self.human_atlas, self.julich_parcellation_2_9,
sb.THOUSAND_BRAINS_COHORT, '0017', scs2)
assert len(tvb_conn_measures2) == 2
assert (list(tvb_conn_measures2.keys()) == ['0017_1', '0017_2'])
assert len(tvb_conn_measures2['0017_1']) == 1
- assert type(tvb_conn_measures2['0017_1'][0]) == graph.ConnectivityMeasure
+ assert isinstance(tvb_conn_measures2['0017_1'][0], graph.ConnectivityMeasure)
assert tvb_conn_measures2['0017_1'][0].connectivity is sc2
assert tvb_conn_measures2['0017_2'][0].connectivity is sc3
@@ -291,27 +296,27 @@ def test_get_connectivities_from_kg_no_fc(self, create_test_atlases_and_parcella
"""
Test retrieval of just structural connectivities
"""
- scs, fcs = sb.get_connectivities_from_kg(self.human_atlas, self.julich_parcellation, sb.DEFAULT_COHORT, '001')
+ scs, fcs = sb.get_connectivities_from_kg(self.human_atlas, self.julich_parcellation_3_0, sb.HCP_COHORT, '001')
assert len(scs) == 1
assert not fcs
assert (list(scs.keys()) == ['001'])
- assert type(scs['001']) == connectivity.Connectivity
+ assert isinstance(scs['001'], connectivity.Connectivity)
def test_get_connectivities_from_kg_with_fc(self, create_test_atlases_and_parcellations):
"""
Test retrieval of both structural and functional connectivities
"""
- scs, fcs = sb.get_connectivities_from_kg(self.human_atlas, self.julich_parcellation, sb.DEFAULT_COHORT,
- '001', True)
+ scs, fcs = sb.get_connectivities_from_kg(self.human_atlas, self.julich_parcellation_3_0, sb.HCP_COHORT, '001',
+ True)
assert len(scs) == 1
assert len(fcs) == 1
assert len(fcs['001']) == 5
assert (list(scs.keys()) == ['001'])
- assert type(scs['001']) == connectivity.Connectivity
+ assert isinstance(scs['001'], connectivity.Connectivity)
assert (list(fcs.keys()) == ['001'])
- assert type(fcs['001'][4]) == graph.ConnectivityMeasure
+ assert isinstance(fcs['001'][4], graph.ConnectivityMeasure)
diff --git a/tvb_framework/tvb/tests/framework/adapters/creators/siibra_creator_test.py b/tvb_framework/tvb/tests/framework/adapters/creators/siibra_creator_test.py
index 87bca85a9d..f431da96bb 100644
--- a/tvb_framework/tvb/tests/framework/adapters/creators/siibra_creator_test.py
+++ b/tvb_framework/tvb/tests/framework/adapters/creators/siibra_creator_test.py
@@ -23,17 +23,11 @@
# https://www.thevirtualbrain.org/tvb/zwei/neuroscience-publications
#
#
-import os
-import pytest
-from siibra.retrieval.requests import EbrainsRequest
-from tvb.adapters.creators.siibra_creator import SiibraCreator, SiibraModel, CLB_AUTH_TOKEN_KEY
-from tvb.tests.framework.adapters.creators import siibra_base_test
+from tvb.adapters.creators.siibra_creator import SiibraCreator, SiibraModel
from tvb.tests.framework.core.base_testcase import TransactionalTestCase
from tvb.tests.framework.core.factory import TestFactory
-@pytest.mark.skipif(siibra_base_test.no_ebrains_auth_token(),
- reason="No EBRAINS AUTH token for accessing the KG was provided!")
class TestSiibraCreator(TransactionalTestCase):
""" Test Siibra Creator functionalities """
@@ -44,15 +38,6 @@ def transactional_setup_method(self):
def test_happy_flow_launch(self, operation_factory):
view_model = SiibraModel()
- if CLB_AUTH_TOKEN_KEY in os.environ:
- view_model.ebrains_token = os.environ[CLB_AUTH_TOKEN_KEY]
- else:
- # This path might be too white-box, as we are replicating siibra mechanism of retrieving
- # an EBRAINS Token based on CLIENT_SECRET and CLIENT_ID, but this way we keep the SiibraCreator
- # both tested and compatible with OpenShift deployments, where a token is provided directly
- req = EbrainsRequest("", {})
- req.init_oidc()
- view_model.ebrains_token = req.kg_token
view_model.subject_ids = '010'
operation = operation_factory(test_user=self.test_user, test_project=self.test_project)
@@ -66,11 +51,11 @@ def test_happy_flow_launch(self, operation_factory):
# connectivities
assert conn_index.has_hemispheres_mask
- assert conn_index.number_of_regions == 294
+ assert conn_index.number_of_regions == 314
assert conn_index.subject == '010'
# connectivity measures
for conn_measure in conn_measure_indices:
- assert conn_measure.parsed_shape == (294, 294)
+ assert conn_measure.parsed_shape == (314, 314)
assert conn_measure.subject == '010'
assert conn_measure.fk_connectivity_gid == conn_index.gid