tractolearn.tractoio package#

Subpackages#

Submodules#

tractolearn.tractoio.dataset_fetch module#

class tractolearn.tractoio.dataset_fetch.Dataset(value)#

Bases: Enum

Datasets for tractography learning.

BUNDLE_LABEL_CONFIG = 'bundle_label_config'#
CONTRASTIVE_AUTOENCODER_WEIGHTS = 'contrastive_ae_weights'#
GENERATIVE_LOA_CONE_CONFIG = 'generative_loa_cone_config'#
GENERATIVE_SEED_STRML_RATIO_CONFIG = 'generative_seed_streamline_ratio_config'#
GENERATIVE_STRML_MAX_COUNT_CONFIG = 'generative_streamline_max_count_config'#
GENERATIVE_STRML_RQ_COUNT_CONFIG = 'generative_streamline_req_count_config'#
GENERATIVE_WM_TISSUE_CRITERION_CONFIG = 'generative_wm_tisue_criterion_config'#
MNI2009CNONLINSYMM_ANAT = 'mni2009cnonlinsymm_anat'#
RECOBUNDLESX_ATLAS = 'recobundlesx_atlas'#
RECOBUNDLESX_CONFIG = 'recobundlesx_config'#
TRACTOINFERNO_HCP_CONTRASTIVE_THR_CONFIG = 'tractoinferno_hcp_contrastive_thr_config'#
TRACTOINFERNO_HCP_REF_TRACTOGRAPHY = 'tractoinferno_hcp_ref_tractography'#
static argparse(s)#
exception tractolearn.tractoio.dataset_fetch.DatasetError#

Bases: Exception

exception tractolearn.tractoio.dataset_fetch.FetcherError#

Bases: Exception

tractolearn.tractoio.dataset_fetch.check_hash(filename, stored_hash=None)#

Check that the hash of the given filename equals the stored one.

Parameters:
  • filename (str) – The path to the file whose hash is to be compared.

  • stored_hash (str, optional) – Used to verify the generated hash. Default: None, checking is skipped.

tractolearn.tractoio.dataset_fetch.copyfileobj_withprogress(fsrc, fdst, total_length, length=16384)#
tractolearn.tractoio.dataset_fetch.fetch_data(files, folder, data_size=None)#

Download files to folder and checks their hashes.

Parameters:
  • files (dictionary) – For each file in files the value should be (url, hash). The file will be downloaded from url if the file does not already exist or if the file exists but the hash does not match.

  • folder (str) – The directory where to save the file, the directory will be created if it does not already exist.

  • data_size (str, optional) – A string describing the size of the data (e.g. “91 MB”) to be logged to the screen. Default does not produce any information about data size.

Raises:

FetcherError – Raises if the hash of the file does not match the expected value. The downloaded file is not deleted when this error is raised.

tractolearn.tractoio.dataset_fetch.retrieve_dataset(name, path)#

Retrieve the given dataset to the provided path.

Parameters:
  • name (string) – Dataset name.

  • path (string) – Destination path.

Returns:

fnames – Filenames for dataset.

Return type:

string or list

tractolearn.tractoio.utils module#

tractolearn.tractoio.utils.load_bundles_dict(dataset_name)#

Loads the bundle dictionary containing their names and classes corresponding to a dataset.

Parameters:

dataset_name (str) – Dataset name whose bundle names and classes are to be fetched. Supported datasets are listed in anatomy.bundles_dictionaries.json.

Returns:

Bundle names and corresponding classes if the dictionary is defined for the dataset; None otherwise.

Return type:

dict or None

tractolearn.tractoio.utils.load_data2(fname, ref_anat_fname, streamline_class_name, random_flip=True, random_flip_ratio=0.3)#
tractolearn.tractoio.utils.load_process_streamlines2(fname, ref_anat_fname, streamline_class_name, random_flip=True, random_flip_ratio=0.3)#
tractolearn.tractoio.utils.load_ref_anat_image(ref_anat_fname)#
tractolearn.tractoio.utils.load_streamline_learning_data(fname, ref_anat_fname, anatomy, random_flip=False)#
tractolearn.tractoio.utils.load_streamlines(fname: str, ref_anat_fname: str, streamlines_class: int, resample: bool = False, num_points: int = 256, flip_all_streamlines: bool = False) Tuple[array, array]#
tractolearn.tractoio.utils.read_data_from_json_file(fname)#
tractolearn.tractoio.utils.read_data_from_pickle_file(fname)#
tractolearn.tractoio.utils.save_data_to_json_file(data, fname)#
tractolearn.tractoio.utils.save_data_to_pickle_file(data, fname)#
tractolearn.tractoio.utils.save_loss_history(loss_history, fname)#
tractolearn.tractoio.utils.save_streamlines(streamlines, ref_anat_fname, tractogram_fname, data_per_streamline: dict | None = None)#
tractolearn.tractoio.utils.write_bundles(anat_ref_fname, class_lookup, streamlines, predicted_classes, path)#

Module contents#