Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
17 commits
Select commit Hold shift + click to select a range
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
47 changes: 47 additions & 0 deletions src/spikeinterface/core/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -213,6 +213,14 @@ def id_to_index(self, id) -> int:
return ind

def annotate(self, **new_annotations) -> None:
"""Adds annotations.

Parameters
----------
**new_annotations : dict
Key-value pairs of annotations to add. If an annotation key already exists,
it will be overwritten.
"""
self._annotations.update(new_annotations)

def set_annotation(self, annotation_key: str, value: Any, overwrite=False) -> None:
Expand All @@ -236,6 +244,24 @@ def set_annotation(self, annotation_key: str, value: Any, overwrite=False) -> No
else:
raise ValueError(f"{annotation_key} is already an annotation key. Use 'overwrite=True' to overwrite it")

def delete_annotation(self, annotation_key: str) -> None:
"""Deletes existing annotation.

Parameters
----------
annotation_key : str
The annotation key to delete

Raises
------
ValueError
If the annotation key does not exist
"""
if annotation_key in self._annotations.keys():
del self._annotations[annotation_key]
else:
raise ValueError(f"{annotation_key} is not an annotation key")

def get_preferred_mp_context(self):
"""
Get the preferred context for multiprocessing.
Expand Down Expand Up @@ -434,6 +460,15 @@ def copy_metadata(
if self._preferred_mp_context is not None:
other._preferred_mp_context = self._preferred_mp_context

if not only_main:
self._extra_metadata_copy(other)

def _extra_metadata_copy(self, other: BaseExtractor):
"""
This is a hook to copy extra metadata that is not in the annotations/properties dict.
"""
pass

def to_dict(
self,
include_annotations: bool = False,
Expand Down Expand Up @@ -567,6 +602,8 @@ def to_dict(
folder_metadata = Path(folder_metadata).resolve().absolute().relative_to(relative_to)
dump_dict["folder_metadata"] = str(folder_metadata)

self._extra_metadata_to_dict(dump_dict)

return dump_dict

@staticmethod
Expand Down Expand Up @@ -855,6 +892,14 @@ def _extra_metadata_to_folder(self, folder):
# This implemented in BaseRecording for probe
pass

def _extra_metadata_from_dict(self, dump_dict):
# This implemented in BaseRecording for probe
pass

def _extra_metadata_to_dict(self, dump_dict):
# This implemented in BaseRecording for probe
pass

def save(self, **kwargs) -> BaseExtractor:
"""
Save a SpikeInterface object.
Expand Down Expand Up @@ -1154,6 +1199,8 @@ def _load_extractor_from_dict(dic) -> BaseExtractor:
for k, v in dic["properties"].items():
extractor.set_property(k, v)

extractor._extra_metadata_from_dict(dic)

return extractor


Expand Down
18 changes: 8 additions & 10 deletions src/spikeinterface/core/baserecording.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,6 @@ class BaseRecording(BaseRecordingSnippets):
_main_annotations = BaseRecordingSnippets._main_annotations + ["is_filtered"]
_main_properties = [
"group",
"location",
"gain_to_uV",
"offset_to_uV",
"gain_to_physical_unit",
Expand Down Expand Up @@ -591,15 +590,16 @@ def _save(self, format="binary", verbose: bool = False, **save_kwargs):
kwargs, job_kwargs = split_job_kwargs(save_kwargs)

if format == "binary":
from .binaryfolder import BinaryFolderRecording
from .binaryrecordingextractor import BinaryRecordingExtractor

folder = kwargs["folder"]
file_paths = [folder / f"traces_cached_seg{i}.raw" for i in range(self.get_num_segments())]
dtype = kwargs.get("dtype", None) or self.get_dtype()
t_starts = self._get_t_starts()

write_binary_recording(self, file_paths=file_paths, dtype=dtype, verbose=verbose, **job_kwargs)

from .binaryrecordingextractor import BinaryRecordingExtractor

# This is created so it can be saved as json because the `BinaryFolderRecording` requires it loading
# See the __init__ of `BinaryFolderRecording`
binary_rec = BinaryRecordingExtractor(
Expand All @@ -616,8 +616,9 @@ def _save(self, format="binary", verbose: bool = False, **save_kwargs):
offset_to_uV=self.get_channel_offsets(),
)
binary_rec.dump(folder / "binary.json", relative_to=folder)

from .binaryfolder import BinaryFolderRecording
if self.has_probe():
probegroup = self.get_probegroup()
write_probeinterface(folder / "probe.json", probegroup)

cached = BinaryFolderRecording(folder_path=folder)

Expand Down Expand Up @@ -648,10 +649,7 @@ def _save(self, format="binary", verbose: bool = False, **save_kwargs):
else:
raise ValueError(f"format {format} not supported")

if self.get_property("contact_vector") is not None:
probegroup = self.get_probegroup()
cached.set_probegroup(probegroup)

# TODO: write binary should save timestamps too
for segment_index in range(self.get_num_segments()):
if self.has_time_vector(segment_index):
# the use of get_times is preferred since timestamps are converted to array
Expand All @@ -676,7 +674,7 @@ def _extra_metadata_from_folder(self, folder):

def _extra_metadata_to_folder(self, folder):
# save probe
if self.get_property("contact_vector") is not None:
if self.has_probe():
probegroup = self.get_probegroup()
write_probeinterface(folder / "probe.json", probegroup)

Expand Down
Loading
Loading