Skip to content

Commit

Permalink
Maintenance
Browse files Browse the repository at this point in the history
  • Loading branch information
AhmetNSimsek committed Oct 16, 2023
1 parent cac216f commit 28eca27
Show file tree
Hide file tree
Showing 15 changed files with 241 additions and 143 deletions.
45 changes: 29 additions & 16 deletions e2e/features/activity_timeseries/test_activity_timeseries.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,40 +3,53 @@
from typing import List

from siibra.features.tabular.regional_timeseries_activity import RegionalBOLD
from siibra.features.feature import CompoundFeature
from e2e.util import check_duplicate

jba_29 = siibra.parcellations["julich 2.9"]


all_bold_instances = [
f
for Cls in siibra.features.feature.Feature.SUBCLASSES[RegionalBOLD]
for f in Cls.get_instances()
]


def test_id_unique():
features = siibra.features.get(jba_29, RegionalBOLD)
duplicates = check_duplicate([f.id for f in features])
duplicates = check_duplicate([f.id for f in all_bold_instances])
assert len(duplicates) == 0


def test_feature_unique():
features = siibra.features.get(jba_29, RegionalBOLD)
duplicates = check_duplicate([f for f in features])
duplicates = check_duplicate([f for f in all_bold_instances])
assert len(duplicates) == 0


# the get_table is a rather expensive operation
bold_cfs = [
siibra.features.get(jba_29, "bold"),
siibra.features.get(siibra.parcellations["julich 3"], "bold")
]


# getting data is a rather expensive operation
# only do once for the master list
def test_timeseries_get_table():
features: List["RegionalBOLD"] = siibra.features.get(jba_29, "bold")
assert len(features) > 0
for f in features:
assert isinstance(f, RegionalBOLD)
assert len(f.index) > 0
assert all(isinstance(subject, str) for subject in f.index)
for subject in f.index:
f.get_table(subject)
def test_timeseries_get_data():
assert len(bold_cfs) == 2
for cf in bold_cfs:
assert isinstance(cf, CompoundFeature)
_ = cf.data
for f in cf:
assert isinstance(f, RegionalBOLD)
assert isinstance(f.subject, str)
assert isinstance(f.index, tuple)
_ = f.data


args = [(jba_29, "RegionalBOLD"), (jba_29, RegionalBOLD)]


@pytest.mark.parametrize("concept,query_arg", args)
def test_get_connectivity(concept, query_arg):
features: List["RegionalBOLD"] = siibra.features.get(concept, query_arg)
assert len(features) > 0, f"Expecting some features exist, but none exist."
features: List["CompoundFeature"] = siibra.features.get(concept, query_arg)
assert len(features) > 0, "Expecting some features exist, but none exist."
51 changes: 29 additions & 22 deletions e2e/features/connectivity/test_connectivity.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,45 +2,53 @@
import pytest
from typing import List
import sys
from siibra.features.feature import CompoundFeature
from siibra.features.connectivity.regional_connectivity import RegionalConnectivity
from e2e.util import check_duplicate
from zipfile import ZipFile

pytestmark = pytest.mark.skipif(sys.platform == "win32", reason="Fails due to memory limitation issues on Windows on Github actions. (Passes on local machines.)")
pytestmark = pytest.mark.skipif(sys.platform == "ubuntu", reason="Fails due to memory limitation issues on Windows on Github actions. (Passes on local machines.)")

features = [
all_conn_instances = [
f
for Cls in siibra.features.feature.Feature.SUBCLASSES[RegionalConnectivity]
for f in Cls.get_instances()
]

compound_conns = siibra.features.get(siibra.parcellations['julich 3'], RegionalConnectivity)


def test_id_unique():
duplicates = check_duplicate([f.id for f in features])
duplicates = check_duplicate([f.id for f in all_conn_instances])
assert len(duplicates) == 0


def test_feature_unique():
duplicates = check_duplicate([f for f in features])
duplicates = check_duplicate([f for f in all_conn_instances])
assert len(duplicates) == 0


@pytest.mark.parametrize("f", features)
def test_connectivity_get_matrix(f: RegionalConnectivity):
assert isinstance(f, RegionalConnectivity)
assert len(f.subjects) > 0
assert all(isinstance(subject, str) for subject in f.subjects)
matrix_df = f.data
@pytest.mark.parametrize("cf", compound_conns)
def test_connectivity_get_data(cf: CompoundFeature):
assert isinstance(cf, CompoundFeature)
assert all([isinstance(f, RegionalConnectivity) for f in cf])
assert len(cf.indices) > 0
matrix_df = cf.data # get average
assert all(matrix_df.index[i] == r for i, r in enumerate(matrix_df.columns))
for subject in f.subjects:
matrix_df = f.data(subject)
assert all(matrix_df.index[i] == r for i, r in enumerate(matrix_df.columns))
for f in cf:
assert isinstance(f, RegionalConnectivity)
matrix_idx_df = f.data
assert all(matrix_idx_df.index[i] == r for i, r in enumerate(matrix_idx_df.columns))


jba_29 = siibra.parcellations["2.9"]
jba_29 = siibra.parcellations["julich 2.9"]
jba_3 = siibra.parcellations["julich 3"]

args = [
(jba_29, "StreamlineCounts"),
(jba_29, "RegionalConnectivity"),
(jba_3, "RegionalConnectivity"),
(jba_3, "Anatomo"),
(jba_29, RegionalConnectivity),
(jba_29, "connectivity"),
(jba_29, siibra.features.connectivity.StreamlineCounts),
Expand All @@ -50,15 +58,15 @@ def test_connectivity_get_matrix(f: RegionalConnectivity):

@pytest.mark.parametrize("concept,query_arg", args)
def test_get_connectivity(concept, query_arg):
features: List["RegionalConnectivity"] = siibra.features.get(concept, query_arg)
features: List["CompoundFeature"] = siibra.features.get(concept, query_arg)
assert len(features) > 0, "Expecting some features exist, but none exist."
assert all(issubclass(cf.subfeature_type, RegionalConnectivity) for cf in features)


def test_copy_is_returned():
feat: RegionalConnectivity = features[0]

feat: RegionalConnectivity = all_conn_instances[0]
# retrieve matrix
matrix = feat.data(feat.subjects[0])
matrix = feat.data

# ensure new val to be put is different from prev val
prev_val = matrix.iloc[0, 0]
Expand All @@ -67,15 +75,14 @@ def test_copy_is_returned():
matrix.iloc[0, 0] = new_val

# retrieve matrix again
matrix_again = feat.data(feat.subjects[0])
matrix_again = feat.data
assert matrix_again.iloc[0, 0] == prev_val


def test_export():
# for now, only test the first feature, given the ci resource concern
feat: RegionalConnectivity = features[0]
feat: RegionalConnectivity = all_conn_instances[0]
feat.export("file.zip")
from zipfile import ZipFile
z = ZipFile("file.zip")
filenames = [info.filename for info in z.filelist]
assert len(filenames) > len(feat.subjects) > 10
assert len([f for f in filenames if f.endswith(".csv")]) > 10
47 changes: 39 additions & 8 deletions e2e/features/test_get.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,14 +16,42 @@ def test_get_instances(Cls: siibra.features.Feature):
"lq0::EbrainsDataFeature::p:minds/core/parcellationatlas/v1.0.0/94c1125b-b87e-45e4-901c-00daee7f2579-290::r:Area hOc1 (V1, 17, CalcS) left::https://nexus.humanbrainproject.org/v0/data/minds/core/dataset/v1.0.0/3ff328fa-f48f-474b-bd81-b5ee7ca230b6",
None,
),
pytest.param(
"lq0::BigBrainIntensityProfile::p:minds/core/parcellationatlas/v1.0.0/94c1125b-b87e-45e4-901c-00daee7f2579-290::r:Area hOc1 (V1, 17, CalcS)::f4380d69a9636d01398238b9ca602d29",
None,
marks=pytest.mark.xfail(
reason="BigBrainIntensityProfile ids are non deterministic... somehow..."
),
),
("e715e1f7-2079-45c4-a67f-f76b102acfce--2db407e630b9eaefa014a5a7fd506207", None),
(
"cf0::BigBrainIntensityProfile::p:minds/core/parcellationatlas/v1.0.0/94c1125b-b87e-45e4-901c-00daee7f2579-290::r:Area hOc1 (V1, 17, CalcS) left::nodsid::ff4271d32d8b6dd556e1ebaa91f09045",
None
), # CompoundFeature of 1579 BigBrainIntensityProfile features grouped by (Modified silver staining) anchored at Area hOc1 (V1, 17, CalcS) left with Set of 1579 points in the Bounding box from (-63.69,-59.94,-29.09) mm to (0.91,77.90,54.03)mm in BigBrain microscopic template (histology) space
(
"lq0::BigBrainIntensityProfile::p:minds/core/parcellationatlas/v1.0.0/94c1125b-b87e-45e4-901c-00daee7f2579-290::r:Area hOc1 (V1, 17, CalcS) left::303197094c5227c245bec8ff34191522",
None
), # BigBrainIntensityProfile queried with Area hOc1 (V1, 17, CalcS) left JBA3 and anchored at Point in BigBrain microscopic template (histology) [-1.587149977684021,69.70700073242188,6.023950099945068]
(
"b08a7dbc-7c75-4ce7-905b-690b2b1e8957--0b464eccb6e8afa4be9fc7a3c814e927",
None
), # MRIVolumeOfInterest 'Fiber structures of a human hippocampus based on joint DMRI, 3D-PLI, and TPFM acquisitions (T2)' in space 'BigBrain microscopic template (histology)
(
"cf0::CellDensityProfile::p:minds/core/parcellationatlas/v1.0.0/94c1125b-b87e-45e4-901c-00daee7f2579-290::r:Area hOc1 (V1, 17, CalcS) left::dc358cb8-2bbb-40f1-998c-356c9e13e4c6::cbc9f7824a81db1ba00deb53c84ec3f7",
None
), # CompoundFeature of 10 CellDensityProfile features grouped by (Segmented cell body density) anchored at Area hOc1 (V1, 17, CalcS) with Set of 10 points in the Bounding box from (-3.95,-65.80,-0.44) mm to (20.20,-42.70,9.71)mm in BigBrain microscopic template (histology) space
(
"dc358cb8-2bbb-40f1-998c-356c9e13e4c6--45a18a7f9c7610b65148136046689234",
None
), # CellDensityProfile (Segmented cell body density) anchored at Area hOc1 (V1, 17, CalcS) with Point in BigBrain microscopic template (histology) [13.53404426574707,-64.30000305175781,5.984400749206543]
(
"cf0::ReceptorDensityProfile::p:minds/core/parcellationatlas/v1.0.0/94c1125b-b87e-45e4-901c-00daee7f2579-290::r:Area hOc1 (V1, 17, CalcS) left::e715e1f7-2079-45c4-a67f-f76b102acfce::a264817171736834d75fffec45ba1757",
None
), # CompoundFeature of 16 ReceptorDensityProfile features grouped by (Receptor density) anchored at Area hOc1 (V1, 17, CalcS)
(
"e715e1f7-2079-45c4-a67f-f76b102acfce--402ff9f8032f5b39bdbd1a9a1c4fe1c0",
None
), # ReceptorDensityProfile (AMPA (alpha-amino-3hydroxy-5-methyl-4isoxazolepropionic acid receptor) density) anchored at Area hOc1 (V1, 17, CalcS) for
(
"cf0::StreamlineCounts::p:minds/core/parcellationatlas/v1.0.0/94c1125b-b87e-45e4-901c-00daee7f2579-300::0f1ccc4a-9a11-4697-b43f-9c9c8ac543e6::6c085751ff0a92d47f967428720e1fe9",
None
), # CompoundFeature of 200 StreamlineCounts features grouped by (StreamlineCounts, HCP) anchored at Julich-Brain Cytoarchitectonic Atlas (v3.0.3)
(
"0f1ccc4a-9a11-4697-b43f-9c9c8ac543e6--5c040dd84fe23933624732e264d3d137",
None
) # StreamlineCounts (StreamlineCounts) anchored at minds/core/parcellationatlas/v1.0.0/94c1125b-b87e-45e4-901c-00daee7f2579-300 with cohort HCP - 005
]


Expand All @@ -33,6 +61,9 @@ def test_get_instance(fid, foo):
assert feat


# this tests whether or not calling a live query caused proxy feature to be
# added to subclasses. (It should not: causes memory leak and also increases
# query time linearly)
@pytest.mark.parametrize("fid,foo", ids)
def test_subclass_count(fid, foo):
len_before = len(siibra.features.Feature.SUBCLASSES[siibra.features.Feature])
Expand Down
2 changes: 1 addition & 1 deletion examples/03_data_features/001_receptor_densities.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@
v1_profiles = siibra.features.get(
siibra.get_region('julich 2.9', 'v1'),
siibra.features.molecular.ReceptorDensityProfile
)
)[0]
for p in v1_profiles:
print(p.receptor)
if "GABAA" in p.receptor:
Expand Down
22 changes: 11 additions & 11 deletions examples/03_data_features/006_connectivity_matrices.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,13 +45,13 @@
# Typically, connectivity features provide a range of region-to-region
# connectivity matrices for different subjects from an imaging cohort.
print("Connectivity features are compounded by the modality and cohort.")
for f in features:
print(f.name)
for cf in features:
print(cf.name)
# let us select the HCP cohort
if f.filter_attributes['cohort'] == "HCP":
conn = f
if "HCP" in cf.compounding_attritbutes:
conn = cf

print("\n" + conn.description)
print(f"Selected: {conn.name}'\n'" + conn.description)

# %%
# The connectivity matrices are provided as pandas DataFrames,
Expand All @@ -62,25 +62,25 @@
# %%
# Subjects are encoded via anonymized ids
print(conn.indices)
subject = conn.indices[0] # let us select the first subject
index = conn.indices[0] # let us select the first subject

# %%
# we can access to corresponding matrix via
matrix = conn[subject].data
matrix = conn[index].data
matrix.iloc[0:15, 0:15] # let us see the first 15x15

# %%
# The matrix can be displayed using `plot` method. Also, it can be
# displayed only for a specific list of regions.
selected_regions = conn.regions[0:30]
conn[subject].plot(regions=selected_regions, reorder=True, cmap="magma")
selected_regions = conn[index].regions[0:30]
conn[index].plot(regions=selected_regions, reorder=True, cmap="magma")

# %%
# We can create a 3D visualization of the connectivity using
# the plotting module of `nilearn <https://nilearn.github.io>`_.
# To do so, we need to provide centroids in
# the anatomical space for each region (or "node") of the connectivity matrix.
node_coords = conn.compute_centroids('mni152')
node_coords = conn[index].compute_centroids('mni152')


# %%
Expand All @@ -92,7 +92,7 @@
node_size=10,
)
view.title(
f"{conn.modality} of subject {subject} in {conn[subject].cohort} cohort "
f"{conn.modality} of subject {index} in {conn[index].cohort} cohort "
f"averaged on {jubrain.name}",
size=10,
)
Expand Down
6 changes: 5 additions & 1 deletion examples/03_data_features/007_comparative_assessment.py
Original file line number Diff line number Diff line change
Expand Up @@ -66,8 +66,10 @@
# showing density distributions from the pial surface to the gray/white matter
# boundary in individual tissue samples. For the receptor measurements, we
# supply now an additional filter to choose only GABAB profiles.
# (ReceptorDensityProfile CompoundFeature is indexed by receptors. List by
# `indices` property.)
modalities = [
(siibra.features.molecular.ReceptorDensityProfile, lambda p: "gabab" in p.receptor.lower()),
(siibra.features.molecular.ReceptorDensityProfile, lambda p: p['GABAB (gamma-aminobutyric acid receptor type B)']),
(siibra.features.cellular.CellDensityProfile, lambda p: True),
(siibra.features.cellular.BigBrainIntensityProfile, lambda p: True),
]
Expand All @@ -86,3 +88,5 @@
p.plot(ax=axs[j, i], layercolor="darkblue")
axs[j, i].set_ylim(0, ymax[j])
f.tight_layout()

# %%
16 changes: 8 additions & 8 deletions examples/03_data_features/008_functional_timeseries.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,20 +37,20 @@
features = siibra.features.get(jubrain, siibra.features.functional.RegionalBOLD)
bold = features[0]
print(f"Found {len(bold)} parcellation-based BOLD signals for {jubrain}.")
print(f"RegionalBOLD features reflects {bold.modality} of {bold.filter_attributes['cohort']} cohort.")
print(bold.name)
print("\n" + bold.description)

# Subjects are encoded via anonymized ids:
# Subjects are encoded via anonymized ids and the CompoundFeature is indexed by
# subject id and paradigm tuples.
print(bold.indices)


# %%
# The parcellation-based functional data are provided as pandas DataFrames
# with region objects as columns and indices as time step.
subject = bold.indices[0]
table = bold[subject].get_table()
print(f"Timestep: {bold[subject].timestep}")
# with region objects as columns and indices as time step. The index is of
# the table is a Timeseries.
index = bold.indices[0]
table = bold[index].data
table[jubrain.get_region("hOc3v left")]

# %%
Expand All @@ -63,7 +63,7 @@
'Area 7A (SPL) left', 'Area 7A (SPL) right', 'CA1 (Hippocampus) left',
'CA1 (Hippocampus) right', 'CA1 (Hippocampus) left', 'CA1 (Hippocampus) right'
]
bold[subject].plot_carpet(regions=selected_regions)
bold[index].plot_carpet(regions=selected_regions)
# %%
# Alternatively, we can visualize the mean signal strength per region:
bold[subject].plot(regions=selected_regions)
bold[index].plot(regions=selected_regions)
6 changes: 2 additions & 4 deletions siibra/configuration/factory.py
Original file line number Diff line number Diff line change
Expand Up @@ -483,18 +483,16 @@ def build_activity_timeseries(cls, spec):
"anchor": cls.extract_anchor(spec),
"description": spec.get("description", ""),
"datasets": cls.extract_datasets(spec),
"timestep": spec.get("timestep", ("1 no_unit"))
"timestep": spec.get("timestep", ("1 timestep"))
}
paradigm = spec.get("paradigm")
if paradigm:
kwargs["paradigm"] = paradigm
files_indexed_by_subjects = spec.get("files_indexed_by", "subjects") == "subjects"
timeseries_by_file = []
for fkey, filename in files.items():
kwargs.update({
"files": {fkey: filename},
"subject": fkey if files_indexed_by_subjects else "average",
"feature": None if files_indexed_by_subjects else fkey
"subject": fkey
})
timeseries_by_file.append(timeseries_cls(**kwargs))
return timeseries_by_file
Expand Down
Loading

0 comments on commit 28eca27

Please sign in to comment.