From 030dfcd631edd00fc0b07814b50708da2f643549 Mon Sep 17 00:00:00 2001 From: VincentBeaud Date: Tue, 26 Nov 2024 10:15:51 -0500 Subject: [PATCH 01/44] Very basic version - no test --- scripts/scil_tracking_local.py | 21 ++++++++++++++------- 1 file changed, 14 insertions(+), 7 deletions(-) diff --git a/scripts/scil_tracking_local.py b/scripts/scil_tracking_local.py index d9c32ded9..43d567ab7 100755 --- a/scripts/scil_tracking_local.py +++ b/scripts/scil_tracking_local.py @@ -73,7 +73,7 @@ from scilpy.io.utils import (add_sphere_arg, add_verbose_arg, assert_headers_compatible, assert_inputs_exist, assert_outputs_exist, parse_sh_basis_arg, - verify_compression_th) + verify_compression_th, load_matrix_in_any_format) from scilpy.tracking.tracker import GPUTacker from scilpy.tracking.utils import (add_mandatory_options_tracking, add_out_options, add_seeding_options, @@ -100,6 +100,10 @@ def _build_arg_parser(): track_g = add_tracking_options(p) add_seeding_options(p) + p.add_argument('--in_custom_seeds', type=str, + help='Path to a file containing a list of custom seeding \n' + 'coordinates. (.txt, .mat or .npy)') + # Other options, only available in this script: track_g.add_argument('--sh_to_pmf', action='store_true', help='If set, map sherical harmonics to spherical ' @@ -222,12 +226,15 @@ def main(): # Note. Seeds are in voxel world, center origin. # (See the examples in random_seeds_from_mask). logging.info("Preparing seeds.") - seeds = track_utils.random_seeds_from_mask( - seed_img.get_fdata(dtype=np.float32), - np.eye(4), - seeds_count=nb_seeds, - seed_count_per_voxel=seed_per_vox, - random_seed=args.seed) + if args.in_custom_seeds: + seeds = np.squeeze(load_matrix_in_any_format(args.in_custom_seeds)) + else: + seeds = track_utils.random_seeds_from_mask( + seed_img.get_fdata(dtype=np.float32), + np.eye(4), + seeds_count=nb_seeds, + seed_count_per_voxel=seed_per_vox, + random_seed=args.seed) total_nb_seeds = len(seeds) if not args.use_gpu: From 1b9e78752dcf19d0af19cb14e33e0a9f701a477e Mon Sep 17 00:00:00 2001 From: VincentBeaud Date: Thu, 28 Nov 2024 07:41:35 -0500 Subject: [PATCH 02/44] custom seeds to scil_tracking_local_dev --- scilpy/tracking/seed.py | 69 ++++++++++++++++++++++++++++++ scripts/scil_tracking_local_dev.py | 45 +++++++++++-------- 2 files changed, 97 insertions(+), 17 deletions(-) diff --git a/scilpy/tracking/seed.py b/scilpy/tracking/seed.py index 3ba27697f..38836aa63 100644 --- a/scilpy/tracking/seed.py +++ b/scilpy/tracking/seed.py @@ -364,3 +364,72 @@ def get_next_n_pos(self, random_generator, shuffled_indices, random_generator) return seeds + + +class CustomSeedsGenerator(SeedGenerator): + """ + Adaptation of the scilpy.tracking.seed.SeedGenerator interface for + using already generated, custom seeds. + """ + def __init__(self, custom_seeds, voxres, origin, space): + """ + Parameters + ---------- + custom_seeds: list + Custom seeding coordinates. + """ + self.voxres = voxres + self.origin = origin + self.space = space + self.seeds = custom_seeds + self.count = 0 + + def init_generator(self, rng_seed, numbers_to_skip): + """ + Initialize a numpy number generator according to user's parameters. + Returns also the shuffled index of all fibertubes. + + The values are not stored in this classed, but are returned to the + user, who should add them as arguments in the methods + self.get_next_pos() + self.get_next_n_pos() + The use of this is that with multiprocessing, each process may have its + own generator, with less risk of using the wrong one when they are + managed by the user. + + Parameters + ---------- + rng_seed : int + The "seed" for the random generator. + numbers_to_skip : int + Number of seeds (i.e. voxels) to skip. Useful if you want to + continue sampling from the same generator as in a first experiment + (with a fixed rng_seed). + + Return + ------ + random_generator : numpy random generator + Initialized numpy number generator. + indices : ndarray + Shuffled indices of current seeding map, shuffled with current + generator. + """ + self.i = numbers_to_skip + + return np.random.default_rng(rng_seed), [] + + def get_next_pos(self, random_generator: np.random.Generator, + shuffled_indices, which_seed): + seed = self.seeds[self.i] + self.i += 1 + + print(seed) + + return seed[0], seed[1], seed[2] + + def get_next_n_pos(self, random_generator, shuffled_indices, + which_seed_start, n): + seeds = self.seeds[self.i:self.i+n] + self.i += n + + return seeds \ No newline at end of file diff --git a/scripts/scil_tracking_local_dev.py b/scripts/scil_tracking_local_dev.py index 82d4d1e4d..d0ebf573a 100755 --- a/scripts/scil_tracking_local_dev.py +++ b/scripts/scil_tracking_local_dev.py @@ -61,10 +61,11 @@ from scilpy.io.utils import (add_processes_arg, add_sphere_arg, add_verbose_arg, assert_inputs_exist, assert_outputs_exist, - parse_sh_basis_arg, verify_compression_th) + parse_sh_basis_arg, verify_compression_th, + load_matrix_in_any_format) from scilpy.image.volume_space_management import DataVolume from scilpy.tracking.propagator import ODFPropagator -from scilpy.tracking.seed import SeedGenerator +from scilpy.tracking.seed import SeedGenerator, CustomSeedsGenerator from scilpy.tracking.tracker import Tracker from scilpy.tracking.utils import (add_mandatory_options_tracking, add_out_options, add_seeding_options, @@ -84,6 +85,10 @@ def _build_arg_parser(): track_g = add_tracking_options(p) add_seeding_options(p) + p.add_argument('--in_custom_seeds', type=str, + help='Path to a file containing a list of custom seeding \n' + 'coordinates. (.txt, .mat or .npy)') + # Options only for here. track_g.add_argument('--algo', default='prob', choices=['det', 'prob'], help='Algorithm to use. [%(default)s]') @@ -191,7 +196,7 @@ def main(): # If save_seeds, space and origin must be vox, center. Choosing those # values. our_space = Space.VOX - our_origin = Origin('center') + our_origin = Origin('corner') # Preparing everything logging.info("Loading seeding mask.") @@ -203,21 +208,27 @@ def main(): 'seeding mask.'.format(args.in_seed)) seed_res = seed_img.header.get_zooms()[:3] - seed_generator = SeedGenerator(seed_data, seed_res, - space=our_space, origin=our_origin, - n_repeats=args.n_repeats_per_seed) - if args.npv: - # toDo. This will not really produce n seeds per voxel, only true - # in average. - nbr_seeds = len(seed_generator.seeds_vox_corner) * args.npv - elif args.nt: - nbr_seeds = args.nt + if args.in_custom_seeds: + seeds = np.squeeze(load_matrix_in_any_format(args.in_custom_seeds)) + seed_generator = CustomSeedsGenerator(seeds, seed_res, space=our_space, origin=our_origin) + nbr_seeds = len(seeds) else: - # Setting npv = 1. - nbr_seeds = len(seed_generator.seeds_vox_corner) - if len(seed_generator.seeds_vox_corner) == 0: - parser.error('Seed mask "{}" does not have any voxel with value > 0.' - .format(args.in_seed)) + seed_generator = SeedGenerator(seed_data, seed_res, + space=our_space, origin=our_origin, + n_repeats=args.n_repeats_per_seed) + + if args.npv: + # toDo. This will not really produce n seeds per voxel, only true + # in average. + nbr_seeds = len(seed_generator.seeds_vox_corner) * args.npv + elif args.nt: + nbr_seeds = args.nt + else: + # Setting npv = 1. + nbr_seeds = len(seed_generator.seeds_vox_corner) + if len(seed_generator.seeds_vox_corner) == 0: + parser.error('Seed mask "{}" does not have any voxel with value > 0.' + .format(args.in_seed)) logging.info("Loading tracking mask.") mask_img = nib.load(args.in_mask) From e438508638d749268d308050f7cfcd3e57a79b1c Mon Sep 17 00:00:00 2001 From: VincentBeaud Date: Thu, 28 Nov 2024 09:07:42 -0500 Subject: [PATCH 03/44] custom seeds for scil_tracking_local_dev.py --- scilpy/tracking/seed.py | 69 ++++++++++++++++++++++++++++++ scripts/scil_tracking_local_dev.py | 43 ++++++++++++------- 2 files changed, 96 insertions(+), 16 deletions(-) diff --git a/scilpy/tracking/seed.py b/scilpy/tracking/seed.py index 3ba27697f..cef0ec6a4 100644 --- a/scilpy/tracking/seed.py +++ b/scilpy/tracking/seed.py @@ -364,3 +364,72 @@ def get_next_n_pos(self, random_generator, shuffled_indices, random_generator) return seeds + + +class CustomSeedsGenerator(SeedGenerator): + """ + Adaptation of the scilpy.tracking.seed.SeedGenerator interface for + using already generated, custom seeds. + """ + def __init__(self, custom_seeds, voxres, origin, space): + """ + Parameters + ---------- + custom_seeds: list + Custom seeding coordinates. + """ + self.voxres = voxres + self.origin = origin + self.space = space + self.seeds = custom_seeds + self.count = 0 + + def init_generator(self, rng_seed, numbers_to_skip): + """ + Initialize a numpy number generator according to user's parameters. + Returns also the shuffled index of all fibertubes. + + The values are not stored in this classed, but are returned to the + user, who should add them as arguments in the methods + self.get_next_pos() + self.get_next_n_pos() + The use of this is that with multiprocessing, each process may have its + own generator, with less risk of using the wrong one when they are + managed by the user. + + Parameters + ---------- + rng_seed : int + The "seed" for the random generator. + numbers_to_skip : int + Number of seeds (i.e. voxels) to skip. Useful if you want to + continue sampling from the same generator as in a first experiment + (with a fixed rng_seed). + + Return + ------ + random_generator : numpy random generator + Initialized numpy number generator. + indices : ndarray + Shuffled indices of current seeding map, shuffled with current + generator. + """ + self.i = numbers_to_skip + + return np.random.default_rng(rng_seed), [] + + def get_next_pos(self, random_generator: np.random.Generator, + shuffled_indices, which_seed): + seed = self.seeds[self.i] + self.i += 1 + + print(seed) + + return seed[0], seed[1], seed[2] + + def get_next_n_pos(self, random_generator, shuffled_indices, + which_seed_start, n): + seeds = self.seeds[self.i:self.i+n] + self.i += n + + return seeds diff --git a/scripts/scil_tracking_local_dev.py b/scripts/scil_tracking_local_dev.py index 82d4d1e4d..fd5b219fe 100755 --- a/scripts/scil_tracking_local_dev.py +++ b/scripts/scil_tracking_local_dev.py @@ -61,10 +61,11 @@ from scilpy.io.utils import (add_processes_arg, add_sphere_arg, add_verbose_arg, assert_inputs_exist, assert_outputs_exist, - parse_sh_basis_arg, verify_compression_th) + parse_sh_basis_arg, verify_compression_th, + load_matrix_in_any_format) from scilpy.image.volume_space_management import DataVolume from scilpy.tracking.propagator import ODFPropagator -from scilpy.tracking.seed import SeedGenerator +from scilpy.tracking.seed import SeedGenerator, CustomSeedsGenerator from scilpy.tracking.tracker import Tracker from scilpy.tracking.utils import (add_mandatory_options_tracking, add_out_options, add_seeding_options, @@ -84,6 +85,10 @@ def _build_arg_parser(): track_g = add_tracking_options(p) add_seeding_options(p) + p.add_argument('--in_custom_seeds', type=str, + help='Path to a file containing a list of custom seeding \n' + 'coordinates. (.txt, .mat or .npy)') + # Options only for here. track_g.add_argument('--algo', default='prob', choices=['det', 'prob'], help='Algorithm to use. [%(default)s]') @@ -203,21 +208,27 @@ def main(): 'seeding mask.'.format(args.in_seed)) seed_res = seed_img.header.get_zooms()[:3] - seed_generator = SeedGenerator(seed_data, seed_res, - space=our_space, origin=our_origin, - n_repeats=args.n_repeats_per_seed) - if args.npv: - # toDo. This will not really produce n seeds per voxel, only true - # in average. - nbr_seeds = len(seed_generator.seeds_vox_corner) * args.npv - elif args.nt: - nbr_seeds = args.nt + if args.in_custom_seeds: + seeds = np.squeeze(load_matrix_in_any_format(args.in_custom_seeds)) + seed_generator = CustomSeedsGenerator(seeds, seed_res, space=our_space, origin=our_origin) + nbr_seeds = len(seeds) else: - # Setting npv = 1. - nbr_seeds = len(seed_generator.seeds_vox_corner) - if len(seed_generator.seeds_vox_corner) == 0: - parser.error('Seed mask "{}" does not have any voxel with value > 0.' - .format(args.in_seed)) + seed_generator = SeedGenerator(seed_data, seed_res, + space=our_space, origin=our_origin, + n_repeats=args.n_repeats_per_seed) + + if args.npv: + # toDo. This will not really produce n seeds per voxel, only true + # in average. + nbr_seeds = len(seed_generator.seeds_vox_corner) * args.npv + elif args.nt: + nbr_seeds = args.nt + else: + # Setting npv = 1. + nbr_seeds = len(seed_generator.seeds_vox_corner) + if len(seed_generator.seeds_vox_corner) == 0: + parser.error('Seed mask "{}" does not have any voxel with value > 0.' + .format(args.in_seed)) logging.info("Loading tracking mask.") mask_img = nib.load(args.in_mask) From faba220e73dbd1f39910dd411f0236b7b2a398f3 Mon Sep 17 00:00:00 2001 From: VincentBeaud Date: Thu, 28 Nov 2024 09:12:53 -0500 Subject: [PATCH 04/44] pep8 and tests --- scripts/scil_tracking_local_dev.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/scripts/scil_tracking_local_dev.py b/scripts/scil_tracking_local_dev.py index d0ebf573a..b9a74f701 100755 --- a/scripts/scil_tracking_local_dev.py +++ b/scripts/scil_tracking_local_dev.py @@ -210,13 +210,14 @@ def main(): seed_res = seed_img.header.get_zooms()[:3] if args.in_custom_seeds: seeds = np.squeeze(load_matrix_in_any_format(args.in_custom_seeds)) - seed_generator = CustomSeedsGenerator(seeds, seed_res, space=our_space, origin=our_origin) + seed_generator = CustomSeedsGenerator(seeds, seed_res, space=our_space, + origin=our_origin) nbr_seeds = len(seeds) else: seed_generator = SeedGenerator(seed_data, seed_res, space=our_space, origin=our_origin, n_repeats=args.n_repeats_per_seed) - + if args.npv: # toDo. This will not really produce n seeds per voxel, only true # in average. @@ -227,8 +228,8 @@ def main(): # Setting npv = 1. nbr_seeds = len(seed_generator.seeds_vox_corner) if len(seed_generator.seeds_vox_corner) == 0: - parser.error('Seed mask "{}" does not have any voxel with value > 0.' - .format(args.in_seed)) + parser.error('Seed mask "{}" does not have any voxel with' + ' value > 0.'.format(args.in_seed)) logging.info("Loading tracking mask.") mask_img = nib.load(args.in_mask) From e49e94abc835072945a33da282e9f8bfa424d5e7 Mon Sep 17 00:00:00 2001 From: VincentBeaud Date: Thu, 28 Nov 2024 09:20:25 -0500 Subject: [PATCH 05/44] remove print and improve doc --- scilpy/tracking/seed.py | 22 +++++++--------------- 1 file changed, 7 insertions(+), 15 deletions(-) diff --git a/scilpy/tracking/seed.py b/scilpy/tracking/seed.py index cef0ec6a4..5ef2048a5 100644 --- a/scilpy/tracking/seed.py +++ b/scilpy/tracking/seed.py @@ -371,12 +371,15 @@ class CustomSeedsGenerator(SeedGenerator): Adaptation of the scilpy.tracking.seed.SeedGenerator interface for using already generated, custom seeds. """ - def __init__(self, custom_seeds, voxres, origin, space): + def __init__(self, custom_seeds, voxres, space=Space('vox'), + origin=Origin('center')): """ Parameters ---------- custom_seeds: list Custom seeding coordinates. + voxres: np.ndarray(3,) + The pixel resolution, ex, using img.header.get_zooms()[:3]. """ self.voxres = voxres self.origin = origin @@ -386,16 +389,8 @@ def __init__(self, custom_seeds, voxres, origin, space): def init_generator(self, rng_seed, numbers_to_skip): """ - Initialize a numpy number generator according to user's parameters. - Returns also the shuffled index of all fibertubes. - - The values are not stored in this classed, but are returned to the - user, who should add them as arguments in the methods - self.get_next_pos() - self.get_next_n_pos() - The use of this is that with multiprocessing, each process may have its - own generator, with less risk of using the wrong one when they are - managed by the user. + Does not do anything. Simulates SeedGenerator's implementation for + retro-compatibility. Parameters ---------- @@ -411,8 +406,7 @@ def init_generator(self, rng_seed, numbers_to_skip): random_generator : numpy random generator Initialized numpy number generator. indices : ndarray - Shuffled indices of current seeding map, shuffled with current - generator. + Empty list for interface retro-compatibility """ self.i = numbers_to_skip @@ -423,8 +417,6 @@ def get_next_pos(self, random_generator: np.random.Generator, seed = self.seeds[self.i] self.i += 1 - print(seed) - return seed[0], seed[1], seed[2] def get_next_n_pos(self, random_generator, shuffled_indices, From 00d4100ae1fde0b6272bcc831c4d107f9a282a11 Mon Sep 17 00:00:00 2001 From: VincentBeaud Date: Thu, 28 Nov 2024 10:21:06 -0500 Subject: [PATCH 06/44] bring back tracking to center --- scripts/scil_tracking_local_dev.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/scil_tracking_local_dev.py b/scripts/scil_tracking_local_dev.py index b9a74f701..9aa0b999b 100755 --- a/scripts/scil_tracking_local_dev.py +++ b/scripts/scil_tracking_local_dev.py @@ -196,7 +196,7 @@ def main(): # If save_seeds, space and origin must be vox, center. Choosing those # values. our_space = Space.VOX - our_origin = Origin('corner') + our_origin = Origin('center') # Preparing everything logging.info("Loading seeding mask.") From b85eb6e6816248b6ea058e34290864e780fa84bf Mon Sep 17 00:00:00 2001 From: EmmaRenauld Date: Fri, 29 Nov 2024 08:51:14 -0500 Subject: [PATCH 07/44] Compare populations: already good! --- .../scil_connectivity_compare_populations.py | 31 +++++++++++++------ 1 file changed, 21 insertions(+), 10 deletions(-) diff --git a/scripts/scil_connectivity_compare_populations.py b/scripts/scil_connectivity_compare_populations.py index 74a9bcced..5f12b8151 100755 --- a/scripts/scil_connectivity_compare_populations.py +++ b/scripts/scil_connectivity_compare_populations.py @@ -2,21 +2,31 @@ # -*- coding: utf-8 -*- """ -Performs a network-based statistical comparison for populations g1 and g2. The -output is a matrix of the same size as the input connectivity matrices, with -p-values at each edge. -All input matrices must have the same shape (NxN). For paired t-test, both -groups must have the same number of observations. +Performs a statistical comparison between connectivity matrices for populations +g1 and g2, using a t-test. + +The inputs are any connectivity matrix, that can be obtained with +scil_connectivity_compute_matrices.py, used separately on the two populations. +All input matrices must have the same shape (NxN). + +The output is a matrix of the same size as the input connectivity matrices, +with p-values at each connection (edge). For example, if you have streamline count weighted matrices for a MCI and a -control group and you want to investiguate differences in their connectomes: +control group, and you want to investiguate differences in their connectomes: >>> scil_connectivity_compare_populations.py pval.npy --g1 MCI/*_sc.npy --g2 CTL/*_sc.npy +Options: + --filtering_mask will simply multiply the binary mask to all input matrices before performing the statistical comparison. Reduces the number of statistical tests, useful when using --fdr or --bonferroni. +--paired with use a paired t-test. Then both groups must have the same number +of observations (subjects). They must be listed in the right order using --g1 +and --g2. + Formerly: scil_compare_connectivity.py """ @@ -53,9 +63,11 @@ def _build_arg_parser(): help='Output matrix (.npy) containing the edges p-value.') p.add_argument('--in_g1', nargs='+', required=True, - help='List of matrices for the first population (.npy).') + help='List of matrices for each subject in the first ' + 'population (.npy).\n') p.add_argument('--in_g2', nargs='+', required=True, - help='List of matrices for the second population (.npy).') + help='List of matrices for each subject in the second ' + 'population (.npy).') p.add_argument('--tail', choices=['left', 'right', 'both'], default='both', help='Enables specification of an alternative hypothesis:\n' 'left: mean of g1 < mean of g2,\n' @@ -94,8 +106,7 @@ def main(): args = parser.parse_args() logging.getLogger().setLevel(logging.getLevelName(args.verbose)) - assert_inputs_exist(parser, args.in_g1+args.in_g2, - args.filtering_mask) + assert_inputs_exist(parser, args.in_g1+args.in_g2, args.filtering_mask) assert_outputs_exist(parser, args, args.out_pval_matrix) if args.filtering_mask: From b15babf4c198582a51519898d6b6bdd2dab09f4e Mon Sep 17 00:00:00 2001 From: EmmaRenauld Date: Fri, 29 Nov 2024 09:04:59 -0500 Subject: [PATCH 08/44] Normalize: already good! --- scripts/scil_connectivity_normalize.py | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/scripts/scil_connectivity_normalize.py b/scripts/scil_connectivity_normalize.py index 8a3fd8a06..c01c08945 100755 --- a/scripts/scil_connectivity_normalize.py +++ b/scripts/scil_connectivity_normalize.py @@ -4,7 +4,9 @@ """ Normalize a connectivity matrix coming from scil_tractogram_segment_connections_from_labels.py. -3 categories of normalization are available: + +3 categories of normalization are available, with option for each. You may +choose any number of non-mutually exclusive options: -- Edge attributes - length: Multiply each edge by the average bundle length. Compensate for far away connections when using interface seeding. @@ -31,10 +33,10 @@ - sum_to_one: Ensure the sum of all edges weight is one - log_10: Apply a base 10 logarithm to all edges weight -The volume and length matrix should come from the +The volume and length matrices should come from the scil_tractogram_segment_connections_from_labels.py script. -A review of the type of normalization is available in: +A review of the types of normalization is available in: Colon-Perez, Luis M., et al. "Dimensionless, scale-invariant, edge weight metric for the study of complex structural networks." PLOS one 10.7 (2015). @@ -83,13 +85,14 @@ def _build_arg_parser(): help='Volume matrix used for edge-wise division.') vol = edge_p.add_mutually_exclusive_group() - # toDo. Same description for the two options vol.add_argument('--parcel_volume', nargs=2, metavar=('ATLAS', 'LABELS_LIST'), - help='Atlas and labels list for edge-wise division.') + help='Atlas and labels list for edge-wise division by \n' + 'the sum of node volum.') vol.add_argument('--parcel_surface', nargs=2, metavar=('ATLAS', 'LABELS_LIST'), - help='Atlas and labels list for edge-wise division.') + help='Atlas and labels list for edge-wise division by \n' + 'the sum of the node surface.') scaling_p = p.add_argument_group('Scaling options') scale = scaling_p.add_mutually_exclusive_group() From 782e678e474237f3fe5bf2885c7767a21c25755c Mon Sep 17 00:00:00 2001 From: EmmaRenauld Date: Fri, 29 Nov 2024 09:27:45 -0500 Subject: [PATCH 09/44] reorder_rois: small cleaning of redundancy --- scripts/scil_connectivity_reorder_rois.py | 55 ++++++++++++----------- 1 file changed, 29 insertions(+), 26 deletions(-) diff --git a/scripts/scil_connectivity_reorder_rois.py b/scripts/scil_connectivity_reorder_rois.py index 9ed78ab5d..9d1d78339 100755 --- a/scripts/scil_connectivity_reorder_rois.py +++ b/scripts/scil_connectivity_reorder_rois.py @@ -2,20 +2,23 @@ # -*- coding: utf-8 -*- """ -Re-order one or many connectivity matrices using a text file format. -The first row are the (x) and the second row the (y), must be space separated. -The resulting matrix does not have to be square (support unequal number of -x and y). +Re-order one or many connectivity matrices' rows and columns. The connectivity +matrices can come, for instance, from scil_connectivity_computes_matrices.py + +To state the new order, use a text file with the following formatting: +The first row are the new x order and the second row the new y order, using +integer values separated by a space. The resulting matrix does not have to be +square (supports unequal number of x and y). The values refer to the coordinates (starting at 0) in the matrix, but if the --labels_list parameter is used, the values will refer to the label which will be converted to the appropriate coordinates. This file must be the same as the one provided to the scil_tractogram_segment_connections_from_labels.py. -To subsequently use scil_visualize_connectivity.py with a lookup table, you +To subsequently use scil_viz_connectivity.py with a lookup table, you must use a label-based reording json and use --labels_list. -You can also use the Optimal Leaf Ordering(OLO) algorithm to transform a +You can also use the Optimal Leaf Ordering (OLO) algorithm to transform a sparse matrix into an ordering that reduces the matrix bandwidth. The output file can then be re-used with --in_ordering. Only one input can be used with this option, we recommand an average streamline count or volume matrix. @@ -61,9 +64,12 @@ def _build_arg_parser(): 'structures along the diagonal.') p.add_argument('--out_suffix', - help='Suffix for the output matrix filename.') + help="Suffix for the output matrices filenames. It will " + "be appended to each input matrix's name.") p.add_argument('--out_dir', - help='Output directory for the re-ordered matrices.') + help='Output directory for the re-ordered matrices.\n' + 'If not set, each output matrix will be saved in ' + 'the same \ndirectory as the input matrix.') p.add_argument('--labels_list', help='List saved by the decomposition script,\n' '--in_ordering must contain labels rather than ' @@ -77,10 +83,15 @@ def _build_arg_parser(): def parse_ordering(in_ordering_file, labels_list=None): """ - toDo. Docstring please. + Read the ordering file, which should contain two lines, with integer + values for each. """ + # Cannot load with numpy in case of non-squared matrix (unequal x/y) with open(in_ordering_file, 'r') as my_file: lines = my_file.readlines() + if len(lines) != 2: + raise ValueError("The ordering file should contain exactly two " + "lines of text.") ordering = [[int(val) for val in lines[0].split()], [int(val) for val in lines[1].split()]] if labels_list: @@ -103,8 +114,12 @@ def main(): assert_inputs_exist(parser, args.in_matrices, [args.labels_list, args.in_ordering]) assert_output_dirs_exist_and_empty(parser, args, [], args.out_dir) + # Verification of output matrices names will be done below. if args.optimal_leaf_ordering is not None: + if args.out_suffix or args.out_dir: + logging.warning("Options --out_suffix and --out_dir are ignored " + "with option --optimal_leaf_ordering.") if len(args.in_matrices) > 1: parser.error('Only one input is supported with RCM.') assert_outputs_exist(parser, args, args.optimal_leaf_ordering) @@ -125,29 +140,17 @@ def main(): basename, ext = os.path.splitext(filename) basename = os.path.basename(basename) - curr_filename = os.path.join(out_dir, - '{}{}.{}'.format(basename, - args.out_suffix, - ext[1:])) + curr_filename = os.path.join( + out_dir, '{}{}.{}'.format(basename, args.out_suffix, ext[1:])) out_filenames.append(curr_filename) assert_outputs_exist(parser, args, out_filenames) - # Cannot load with numpy in case of non-squared matrix (unequal x/y) ordering = parse_ordering(args.in_ordering, args.labels_list) - for filename in args.in_matrices: - out_dir = os.path.dirname(filename) if args.out_dir is None \ - else args.out_dir - basename, ext = os.path.splitext(filename) - basename = os.path.basename(basename) - matrix = load_matrix_in_any_format(filename) + for in_name, out_name in zip(args.in_matrices, out_filenames): + matrix = load_matrix_in_any_format(in_name) reordered_matrix = apply_reordering(matrix, ordering) - curr_filename = os.path.join(out_dir, - '{}{}.{}'.format(basename, - args.out_suffix, - ext[1:])) - - save_matrix_in_any_format(curr_filename, reordered_matrix) + save_matrix_in_any_format(out_name, reordered_matrix) if __name__ == "__main__": From 8a87d04c12a1225efb5b024292f92e8bda0e34e3 Mon Sep 17 00:00:00 2001 From: EmmaRenauld Date: Fri, 29 Nov 2024 11:59:56 -0500 Subject: [PATCH 10/44] Compute_pca: big refactor --- scripts/scil_connectivity_compute_matrices.py | 11 + scripts/scil_connectivity_compute_pca.py | 340 +++++++++--------- 2 files changed, 171 insertions(+), 180 deletions(-) diff --git a/scripts/scil_connectivity_compute_matrices.py b/scripts/scil_connectivity_compute_matrices.py index f28a1730b..bbd66644a 100755 --- a/scripts/scil_connectivity_compute_matrices.py +++ b/scripts/scil_connectivity_compute_matrices.py @@ -33,6 +33,8 @@ - Streamline count. - Length: mean streamline length (mm). + Note that this matrix, as well as the volume-weighted, can be used to + normalize a streamline count matrix in scil_connectivity_normalize. - Volume-weighted: Volume of the bundle. - Similarity: mean density. Uses pre-computed density maps, which can be obtained with @@ -54,6 +56,15 @@ - Mean DPS: Mean values in the data_per_streamline of each streamline in the bundles. +What next? +========== +See our other scripts to help you achieve your goals: + - Normalize a streamline-count matrix based on other matrices using + scil_connectivity_normalize. + - Compute a t-test between two groups of subjects using + scil_connectivity_compare_populations. + - See all our scripts starting with scil_connectivity_ for more ideas! + Formerly: scil_compute_connectivity.py """ diff --git a/scripts/scil_connectivity_compute_pca.py b/scripts/scil_connectivity_compute_pca.py index 8a19d7a5f..6ab12ed8e 100755 --- a/scripts/scil_connectivity_compute_pca.py +++ b/scripts/scil_connectivity_compute_pca.py @@ -2,23 +2,38 @@ # -*- coding: utf-8 -*- """ -Script to compute PCA analysis on diffusion metrics. Output returned is all -significant principal components (e.g. presenting eigenvalues > 1) in a -connectivity matrix format. This script can take into account all edges from -every subject in a population or only non-zero edges across all subjects. +Script to compute PCA analysis on a set of connectivity matrices. The output is +all significant principal components in a connectivity matrix format. +This script can take into account all edges from every subject in a population +or only non-zero edges across all subjects. + +Interpretation of resulting principal components can be done by evaluating the +loadings values for each metrics. A value near 0 means that this metric doesn't +contribute to this specific component whereas high positive or negative values +mean a larger contribution. Components can then be labeled based on which +metric contributes the highest. For example, a principal component showing a +high loading for afd_fixel and near 0 loading for all other metrics can be +interpreted as axonal density (see Gagnon et al. 2022 for this specific example +or ref [3] for an introduction to PCA). The script can take directly as input a connectoflow output folder. Simply use -the --input_connectoflow flag. For other type of folder input, the script -expects a single folder containing all matrices for all subjects. -Example: +the --input_connectoflow flag. Else, the script expects a single folder +containing all matrices for all subjects. Those matrices can be obtained, for +instance, by scil_connectivity_compute_matrices.py. +Example: Default input [in_folder] |--- sub-01_ad.npy |--- sub-01_md.npy |--- sub-02_ad.npy |--- sub-02_md.npy |--- ... +Connectoflow input: + [in_folder] + [subj-01] + [Compute_Connectivity] + |--- ad.npy -The plots, tables and principal components matrices will be outputted in the +The plots, tables and principal components matrices will be saved in the designated folder from the argument. If you want to move back your principal components matrices in your connectoflow output, you can use a similar bash command for all principal components: @@ -27,23 +42,14 @@ cp out_folder/${sub}_PC1.npy connectoflow_output/$sub/Compute_Connectivity/ done -Interpretation of resulting principal components can be done by evaluating the -loadings values for each metrics. A value near 0 means that this metric doesn't -contribute to this specific component whereas high positive or negative values -mean a larger contribution. Components can then be labeled based on which -metric contributes the highest. For example, a principal component showing a -high loading for afd_fixel and near 0 loading for all other metrics can be -interpreted as axonal density (see Gagnon et al. 2022 for this specific example -or ref [3] for an introduction to PCA). - EXAMPLE USAGE: scil_connectivity_compute_pca.py input_folder/ output_folder/ --metrics ad fa md rd [...] --list_ids list_ids.txt """ -# Import required libraries. import argparse import logging +import os import matplotlib.pyplot as plt import numpy as np @@ -55,7 +61,8 @@ save_matrix_in_any_format, add_verbose_arg, add_overwrite_arg, - assert_output_dirs_exist_and_empty) + assert_output_dirs_exist_and_empty, + assert_inputs_dirs_exist, assert_inputs_exist) EPILOG = """ @@ -91,7 +98,7 @@ def _build_arg_parser(): 'followed by the .npy extension.') p.add_argument('--list_ids', required=True, metavar='FILE', help='Path to a .txt file containing a list of all ids.') - p.add_argument('--not_only_common', action='store_true', + p.add_argument('--all_edges', action='store_true', help='If true, will include all edges from all subjects ' 'and not only \ncommon edges (Not recommended)') p.add_argument('--input_connectoflow', action='store_true', @@ -104,41 +111,76 @@ def _build_arg_parser(): return p -def generate_pca_input(dictionary): - """ - Function to create PCA input from matrix. - :param dictionary: Dictionary with metrics as keys containing list of matrices sorted by ids. - :return: Numpy array. - """ - for metric in dictionary.keys(): - mat = np.rollaxis(np.array(dictionary[metric]), axis=1, start=3) - matrix_shape = mat.shape[1:3] - n_group = mat.shape[0] - mat = mat.reshape((np.prod(matrix_shape) * n_group, 1)) - dictionary[metric] = mat +def plot_eigenvalues(pca, principaldf, nb_metrics, out_file): + # Plot the eigenvalues. + logging.info('Plotting results...') + eigenvalues = pca.explained_variance_ + pos = list(range(1, nb_metrics + 1)) + plt.clf() + plt.cla() - return np.hstack([dictionary[i] for i in dictionary.keys()]) + fig = plt.figure() + ax = fig.add_subplot(1, 1, 1) + bar_eig = ax.bar(pos, eigenvalues, align='center', + tick_label=principaldf.columns) + ax.set_xlabel('Principal Components', fontsize=10) + ax.set_ylabel('Eigenvalues', fontsize=10) + ax.set_title('Eigenvalues for each principal components.', fontsize=10) + ax.margins(0.1) + autolabel(bar_eig, ax) + plt.savefig(out_file) + return eigenvalues -def restore_zero_values(orig, new): - """ - Function to restore 0 values in a numpy array. - :param orig: Original numpy array containing 0 values to restore. - :param new: Array in which 0 values need to be restored. - :return: Numpy array with data from the new array but zeros from the original array. - """ - mask = np.copy(orig) - mask[mask != 0] = 1 +def plot_explained_variance(pca, principaldf, nb_metrics, out_file): + # Plot the explained variance. + explained_var = pca.explained_variance_ratio_ + pos = list(range(1, nb_metrics + 1)) + plt.clf() + plt.cla() + fig = plt.figure() + ax = fig.add_subplot(1, 1, 1) + bar_var = ax.bar(pos, explained_var, align='center', + tick_label=principaldf.columns) + ax.set_xlabel('Principal Components', fontsize=10) + ax.set_ylabel('Explained variance', fontsize=10) + ax.set_title('Amount of explained variance for all principal components.', + fontsize=10) + ax.margins(0.1) + autolabel(bar_var, ax) + plt.savefig(out_file) - return np.multiply(new, mask) + +def plot_contribution(pca, principaldf, metrics, out_excel, out_image): + # Plot the contribution of each measures to principal component. + component = pca.components_ + output_component = pd.DataFrame(component, index=principaldf.columns, + columns=metrics) + output_component.to_excel(out_excel, index=True, header=True) + plt.clf() + plt.cla() + fig, axs = plt.subplots(2) + fig.suptitle('Graph of the contribution of each measures to the first ' + 'and second principal component.', fontsize=10) + pos = list(range(1, len(metrics) + 1)) + bar_pc1 = axs[0].bar(pos, component[0], align='center', + tick_label=metrics) + bar_pc2 = axs[1].bar(pos, component[1], align='center', + tick_label=metrics) + axs[0].margins(0.2) + axs[1].margins(0.2) + autolabel(bar_pc1, axs[0]) + autolabel(bar_pc2, axs[1]) + for ax in axs.flat: + ax.set(xlabel='Diffusion measures', ylabel='Loadings') + for ax in axs.flat: + ax.label_outer() + plt.savefig(out_image) def autolabel(rects, axs): """ Attach a text label above each bar displaying its height (or bar value). - :param rects: Graphical object. - :param axs: Axe number. - :return: """ for rect in rects: height = rect.get_height() @@ -150,56 +192,19 @@ def autolabel(rects, axs): '%.3f' % float(height), ha='center', va='bottom') -def extracting_common_cnx(dictionary, ind): - """ - Function to create a binary mask representing common connections across the population - containing non-zero values. - :param dictionary: Dictionary with metrics as keys containing list of matrices sorted by ids. - :param ind: Indice of the key to use to generate the binary mask from the dictionary. - :return: Binary mask. - """ - keys = list(dictionary.keys()) - met = np.copy(dictionary[keys[ind]]) - - # Replacing all non-zero values by 1. - for i in range(0, len(met)): - met[i][met[i] != 0] = 1 - - # Adding all matrices together. - orig = np.copy(met[0]) - mask = [orig] - for i in range(1, len(met)): - orig = np.add(orig, met[i]) - mask.append(orig) - - # Converting resulting values to 0 and 1. - mask_f = mask[(len(met) - 1)] - mask_f[mask_f != len(met)] = 0 - mask_f[mask_f == len(met)] = 1 - - return mask_f - - -def apply_binary_mask(dictionary, mask): - """ - Function to apply a binary mask to all matrices contained in a dictionary. - :param dictionary: Dictionary with metrics as keys containing list of matrices sorted by ids. - :param mask: Binary mask. - :return: Dictionary with the same shape as input. - """ - for a in dictionary.keys(): - for i in range(0, len(dictionary[a])): - dictionary[a][i] = np.multiply(dictionary[a][i], mask) - - return dictionary - - def main(): parser = _build_arg_parser() args = parser.parse_args() logging.getLogger().setLevel(logging.getLevelName(args.verbose)) - assert_output_dirs_exist_and_empty(parser, args, args.out_folder, create_dir=True) + assert_inputs_dirs_exist(parser, args.in_folder) + assert_inputs_exist(parser, args.list_ids) + assert_output_dirs_exist_and_empty(parser, args, args.out_folder, + create_dir=True) + out_eigenvalues = os.path.join(args.out_folder, 'eigenvalues.pdf') + out_variance = os.path.join(args.out_folder, 'explained_variance.pdf') + out_excel = os.path.join(args.out_folder, 'loadings.xlsx') + out_contributions = os.path.join(args.out_folder, 'contribution.pdf') with open(args.list_ids) as f: subjects = f.read().split() @@ -207,122 +212,97 @@ def main(): if args.input_connectoflow: # Loading all matrix. logging.info('Loading all matrices from a Connectoflow output...') - dictionary = {m: [load_matrix_in_any_format(f'{args.in_folder}/{a}/Compute_Connectivity/{m}.npy') - for a in subjects] - for m in args.metrics} + files_per_metric = [[os.path.join( + args.in_folder, a, 'Compute_Connectivity', '{}.npy'.format(m)) + for a in subjects] + for m in args.metrics] else: logging.info('Loading all matrices...') - dictionary = {m: [load_matrix_in_any_format(f'{args.in_folder}/{a}_{m}.npy') for a in subjects] - for m in args.metrics} - # Assert that all metrics have the same number of subjects. - nb_sub = [len(dictionary[m]) for m in args.metrics] - assert all(x == len(subjects) for x in nb_sub), "Error, the number of matrices for each metric doesn't match" \ - "the number of subject in the id list." \ - "Please validate input folder." + files_per_metric = [[os.path.join( + args.in_folder, '{}_{}.npy'.format(a, m)) + for a in subjects] + for m in args.metrics] + + assert_inputs_exist(parser, sum(files_per_metric, [])) + matrices_per_metric = [[load_matrix_in_any_format(f) for f in files] + for files in files_per_metric] # Setting individual matrix shape. - mat_shape = dictionary[args.metrics[0]][0].shape + mat_shape = matrices_per_metric[0][0].shape - if args.not_only_common: + if args.all_edges: # Creating input structure using all edges from all subjects. logging.info('Creating PCA input structure with all edges...') - df = generate_pca_input(dictionary) else: - m1 = extracting_common_cnx(dictionary, 0) - m2 = extracting_common_cnx(dictionary, 1) - - if m1.shape != mat_shape: - parser.error("Extracted binary mask doesn't match the shape of individual input matrix.") - - if np.sum(m1) != np.sum(m2): - parser.error("Different binary mask have been generated from 2 different metrics, \n " - "please validate input data.") - else: - logging.info('Data shows {} common connections across the population.'.format(np.sum(m1))) - - dictionary = apply_binary_mask(dictionary, m1) - - # Creating input structure. logging.info('Creating PCA input structure with common edges...') - df = generate_pca_input(dictionary) - # Setting 0 values to nan. - df[df == 0] = 'nan' - - # Standardized the data. + common_edges_mask = np.sum(matrices_per_metric[0]) != 0 + + # Verifying that other metrics have the same common edges + for i, matrices in enumerate(matrices_per_metric[1:]): + tmp_mask = np.sum(matrices) != 0 + if not np.array_equal(common_edges_mask, tmp_mask): + parser.error("Different binary masks (common edge) have been " + "generated from metric {} than other metrics. " + "Please validate your input data." + .format(args.metrics[i])) + + logging.info('Data shows {} common connections across the population.' + .format(np.sum(common_edges_mask))) + + # Apply mask + matrices_per_metric = [[m * common_edges_mask + for m in metric_matrices] + for metric_matrices in matrices_per_metric] + + # Creating input structure. + # For each metric, combining all subjects together, flattening all + # connectivity matrices. + flat_mats = [] + for metric_matrices in matrices_per_metric: + mat = np.rollaxis(np.array(metric_matrices), axis=1, start=3) + mat = mat.reshape((np.prod(mat.shape), 1)) + flat_mats.append(mat) + df = np.hstack(flat_mats) # Shape: nb_metrics x (flattened data) + df[df == 0] = np.nan # Setting 0 values to nan. + + # Standardizing the data. logging.info('Standardizing data...') x = StandardScaler().fit_transform(df) df_pca = x[~np.isnan(x).any(axis=1)] x = np.nan_to_num(x, nan=0.) - # Perform the PCA. + # Performing the PCA. logging.info('Performing PCA...') pca = PCA(n_components=len(args.metrics)) principalcomponents = pca.fit_transform(df_pca) - principaldf = pd.DataFrame(data=principalcomponents, columns=[f'PC{i}' for i in range(1, len(args.metrics) + 1)]) - - # Plot the eigenvalues. - logging.info('Plotting results...') - eigenvalues = pca.explained_variance_ - pos = list(range(1, len(args.metrics)+1)) - plt.clf() - plt.cla() - fig = plt.figure() - ax = fig.add_subplot(1, 1, 1) - bar_eig = ax.bar(pos, eigenvalues, align='center', tick_label=principaldf.columns) - ax.set_xlabel('Principal Components', fontsize=10) - ax.set_ylabel('Eigenvalues', fontsize=10) - ax.set_title('Eigenvalues for each principal components.', fontsize=10) - ax.margins(0.1) - autolabel(bar_eig, ax) - plt.savefig(f'{args.out_folder}/eigenvalues.pdf') - - # Plot the explained variance. - explained_var = pca.explained_variance_ratio_ - plt.clf() - plt.cla() - fig = plt.figure() - ax = fig.add_subplot(1, 1, 1) - bar_var = ax.bar(pos, explained_var, align='center', tick_label=principaldf.columns) - ax.set_xlabel('Principal Components', fontsize=10) - ax.set_ylabel('Explained variance', fontsize=10) - ax.set_title('Amount of explained variance for all principal components.', fontsize=10) - ax.margins(0.1) - autolabel(bar_var, ax) - plt.savefig(f'{args.out_folder}/explained_variance.pdf') - - # Plot the contribution of each measures to principal component. - component = pca.components_ - output_component = pd.DataFrame(component, index=principaldf.columns, columns=args.metrics) - output_component.to_excel(f'{args.out_folder}/loadings.xlsx', index=True, header=True) - plt.clf() - plt.cla() - fig, axs = plt.subplots(2) - fig.suptitle('Graph of the contribution of each measures to the first and second principal component.', fontsize=10) - bar_pc1 = axs[0].bar(pos, component[0], align='center', tick_label=args.metrics) - bar_pc2 = axs[1].bar(pos, component[1], align='center', tick_label=args.metrics) - axs[0].margins(0.2) - axs[1].margins(0.2) - autolabel(bar_pc1, axs[0]) - autolabel(bar_pc2, axs[1]) - for ax in axs.flat: - ax.set(xlabel='Diffusion measures', ylabel='Loadings') - for ax in axs.flat: - ax.label_outer() - plt.savefig(f'{args.out_folder}/contribution.pdf') + principaldf = pd.DataFrame( + data=principalcomponents, + columns=[f'PC{i}' for i in range(1, len(args.metrics) + 1)]) + + # Plotting + eigenvalues = plot_eigenvalues(pca, principaldf, + nb_metrics=len(args.metrics), + out_file=out_eigenvalues) + plot_explained_variance(pca, principaldf, nb_metrics=len(args.metrics), + out_file=out_variance) + plot_contribution(pca, principaldf, args.metrics, out_excel, + out_contributions) # Extract the derived newly computed measures from the PCA analysis. logging.info('Saving matrices for PC with eigenvalues > 1...') out = pca.transform(x) - out = restore_zero_values(x, out) - out = out.swapaxes(0, 1).reshape(len(args.metrics), len(subjects), mat_shape[0], mat_shape[1]) + out = out * (x != 0) + out = out.swapaxes(0, 1).reshape(len(args.metrics), len(subjects), + mat_shape[0], mat_shape[1]) # Save matrix for significant components nb_pc = eigenvalues[eigenvalues >= 1] for i in range(0, len(nb_pc)): for s in range(0, len(subjects)): - save_matrix_in_any_format(f'{args.out_folder}/{subjects[s]}_PC{i+1}.npy', - out[i, s, :, :]) + filename = os.path.join(args.out_folder, + f'{subjects[s]}_PC{i+1}.npy') + save_matrix_in_any_format(filename, out[i, s, :, :]) if __name__ == "__main__": From cf57e30f495d240bfbf682332824a555ef3db9b7 Mon Sep 17 00:00:00 2001 From: EmmaRenauld Date: Fri, 29 Nov 2024 12:03:32 -0500 Subject: [PATCH 11/44] typo --- scripts/scil_connectivity_compare_populations.py | 2 +- scripts/scil_connectivity_normalize.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/scil_connectivity_compare_populations.py b/scripts/scil_connectivity_compare_populations.py index 5f12b8151..1a5b561ec 100755 --- a/scripts/scil_connectivity_compare_populations.py +++ b/scripts/scil_connectivity_compare_populations.py @@ -23,7 +23,7 @@ matrices before performing the statistical comparison. Reduces the number of statistical tests, useful when using --fdr or --bonferroni. ---paired with use a paired t-test. Then both groups must have the same number +--paired will use a paired t-test. Then both groups must have the same number of observations (subjects). They must be listed in the right order using --g1 and --g2. diff --git a/scripts/scil_connectivity_normalize.py b/scripts/scil_connectivity_normalize.py index c01c08945..c18ba9654 100755 --- a/scripts/scil_connectivity_normalize.py +++ b/scripts/scil_connectivity_normalize.py @@ -5,7 +5,7 @@ Normalize a connectivity matrix coming from scil_tractogram_segment_connections_from_labels.py. -3 categories of normalization are available, with option for each. You may +3 categories of normalization are available, with options for each. You may choose any number of non-mutually exclusive options: -- Edge attributes - length: Multiply each edge by the average bundle length. From ac6dd88cda1cc96b094257f61057a3b266e888c0 Mon Sep 17 00:00:00 2001 From: VincentBeaud Date: Thu, 5 Dec 2024 16:25:21 -0500 Subject: [PATCH 12/44] custom seeds tests and pep8 --- scilpy/tracking/seed.py | 2 +- scilpy/tracking/utils.py | 5 +++++ scripts/scil_tracking_local.py | 4 ---- scripts/scil_tracking_local_dev.py | 8 ++------ scripts/tests/test_tracking_local.py | 18 ++++++++++++++++++ scripts/tests/test_tracking_local_dev.py | 23 +++++++++++++++++++++++ 6 files changed, 49 insertions(+), 11 deletions(-) diff --git a/scilpy/tracking/seed.py b/scilpy/tracking/seed.py index 5ef2048a5..8f869548c 100644 --- a/scilpy/tracking/seed.py +++ b/scilpy/tracking/seed.py @@ -366,7 +366,7 @@ def get_next_n_pos(self, random_generator, shuffled_indices, return seeds -class CustomSeedsGenerator(SeedGenerator): +class CustomSeedsDispenser(SeedGenerator): """ Adaptation of the scilpy.tracking.seed.SeedGenerator interface for using already generated, custom seeds. diff --git a/scilpy/tracking/utils.py b/scilpy/tracking/utils.py index c95626097..15ffd2e5a 100644 --- a/scilpy/tracking/utils.py +++ b/scilpy/tracking/utils.py @@ -126,6 +126,11 @@ def add_seeding_options(p): help='Number of seeds per voxel.') seed_sub_exclusive.add_argument('--nt', type=int, help='Total number of seeds to use.') + seed_sub_exclusive.add_argument( + '--in_custom_seeds', type=str, + help='Path to a file containing a list of custom seeding \n' + 'coordinates (.txt, .mat or .npy). They should be in' + 'voxel space.') def add_out_options(p): diff --git a/scripts/scil_tracking_local.py b/scripts/scil_tracking_local.py index 43d567ab7..8f7f7105c 100755 --- a/scripts/scil_tracking_local.py +++ b/scripts/scil_tracking_local.py @@ -100,10 +100,6 @@ def _build_arg_parser(): track_g = add_tracking_options(p) add_seeding_options(p) - p.add_argument('--in_custom_seeds', type=str, - help='Path to a file containing a list of custom seeding \n' - 'coordinates. (.txt, .mat or .npy)') - # Other options, only available in this script: track_g.add_argument('--sh_to_pmf', action='store_true', help='If set, map sherical harmonics to spherical ' diff --git a/scripts/scil_tracking_local_dev.py b/scripts/scil_tracking_local_dev.py index a47b29fd5..889655558 100755 --- a/scripts/scil_tracking_local_dev.py +++ b/scripts/scil_tracking_local_dev.py @@ -65,7 +65,7 @@ load_matrix_in_any_format) from scilpy.image.volume_space_management import DataVolume from scilpy.tracking.propagator import ODFPropagator -from scilpy.tracking.seed import SeedGenerator, CustomSeedsGenerator +from scilpy.tracking.seed import SeedGenerator, CustomSeedsDispenser from scilpy.tracking.tracker import Tracker from scilpy.tracking.utils import (add_mandatory_options_tracking, add_out_options, add_seeding_options, @@ -85,10 +85,6 @@ def _build_arg_parser(): track_g = add_tracking_options(p) add_seeding_options(p) - p.add_argument('--in_custom_seeds', type=str, - help='Path to a file containing a list of custom seeding \n' - 'coordinates. (.txt, .mat or .npy)') - # Options only for here. track_g.add_argument('--algo', default='prob', choices=['det', 'prob'], help='Algorithm to use. [%(default)s]') @@ -210,7 +206,7 @@ def main(): seed_res = seed_img.header.get_zooms()[:3] if args.in_custom_seeds: seeds = np.squeeze(load_matrix_in_any_format(args.in_custom_seeds)) - seed_generator = CustomSeedsGenerator(seeds, seed_res, space=our_space, + seed_generator = CustomSeedsDispenser(seeds, seed_res, space=our_space, origin=our_origin) nbr_seeds = len(seeds) else: diff --git a/scripts/tests/test_tracking_local.py b/scripts/tests/test_tracking_local.py index 91c659c47..757dbd317 100644 --- a/scripts/tests/test_tracking_local.py +++ b/scripts/tests/test_tracking_local.py @@ -3,6 +3,7 @@ import os import tempfile +import numpy as np from scilpy import SCILPY_HOME from scilpy.io.fetcher import fetch_data, get_testing_files_dict @@ -183,3 +184,20 @@ def test_execution_tracking_ptt(script_runner, monkeypatch): '--sh_to_pmf', '-v', '--probe_length', '2', '--probe_quality', '5', '--algo', 'ptt') assert ret.success + + +def test_execution_tracking_fodf_custom_seeds(script_runner, monkeypatch): + monkeypatch.chdir(os.path.expanduser(tmp_dir.name)) + in_fodf = os.path.join(SCILPY_HOME, 'tracking', 'fodf.nii.gz') + in_mask = os.path.join(SCILPY_HOME, 'tracking', 'seeding_mask.nii.gz') + in_custom_seeds = 'custom_seeds.npy' + + custom_seeds = [[1., 1., 1.], [2., 2., 2.], [3., 3., 3.]] + np.save(in_custom_seeds, custom_seeds) + + ret = script_runner.run('scil_tracking_local.py', in_fodf, + in_mask, in_mask, 'local_prob.trk', + '--in_custom_seeds', in_custom_seeds, + '--compress', '0.1', '--sh_basis', 'descoteaux07', + '--min_length', '20', '--max_length', '200') + assert ret.success diff --git a/scripts/tests/test_tracking_local_dev.py b/scripts/tests/test_tracking_local_dev.py index 7d02f4d13..f8001d66f 100644 --- a/scripts/tests/test_tracking_local_dev.py +++ b/scripts/tests/test_tracking_local_dev.py @@ -3,6 +3,7 @@ import os import tempfile +import numpy as np from scilpy import SCILPY_HOME from scilpy.io.fetcher import fetch_data, get_testing_files_dict @@ -32,3 +33,25 @@ def test_execution_tracking_fodf(script_runner, monkeypatch): '--sub_sphere', '2', '--rk_order', '4') assert ret.success + + +def test_execution_tracking_fodf_custom_seeds(script_runner, monkeypatch): + monkeypatch.chdir(os.path.expanduser(tmp_dir.name)) + in_fodf = os.path.join(SCILPY_HOME, 'tracking', + 'fodf.nii.gz') + in_mask = os.path.join(SCILPY_HOME, 'tracking', + 'seeding_mask.nii.gz') + in_custom_seeds = 'custom_seeds.npy' + + custom_seeds = [[1., 1., 1.], [2., 2., 2.], [3., 3., 3.]] + np.save(in_custom_seeds, custom_seeds) + + ret = script_runner.run('scil_tracking_local_dev.py', in_fodf, + in_mask, in_mask, 'local_prob.trk', + '--in_custom_seeds', in_custom_seeds, + '--compress', '0.1', '--sh_basis', 'descoteaux07', + '--min_length', '20', '--max_length', '200', + '--save_seeds', '--rng_seed', '0', + '--sub_sphere', '2', + '--rk_order', '4') + assert ret.success From b56f5db0644897c20cf7aeaef877659291377442 Mon Sep 17 00:00:00 2001 From: VincentBeaud Date: Wed, 11 Dec 2024 11:26:14 -0500 Subject: [PATCH 13/44] Pass tests --- scripts/tests/test_tracking_local.py | 2 +- scripts/tests/test_tracking_local_dev.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/tests/test_tracking_local.py b/scripts/tests/test_tracking_local.py index 757dbd317..e4abdf417 100644 --- a/scripts/tests/test_tracking_local.py +++ b/scripts/tests/test_tracking_local.py @@ -196,7 +196,7 @@ def test_execution_tracking_fodf_custom_seeds(script_runner, monkeypatch): np.save(in_custom_seeds, custom_seeds) ret = script_runner.run('scil_tracking_local.py', in_fodf, - in_mask, in_mask, 'local_prob.trk', + in_mask, in_mask, 'local_prob4.trk', '--in_custom_seeds', in_custom_seeds, '--compress', '0.1', '--sh_basis', 'descoteaux07', '--min_length', '20', '--max_length', '200') diff --git a/scripts/tests/test_tracking_local_dev.py b/scripts/tests/test_tracking_local_dev.py index f8001d66f..fc96605ac 100644 --- a/scripts/tests/test_tracking_local_dev.py +++ b/scripts/tests/test_tracking_local_dev.py @@ -47,7 +47,7 @@ def test_execution_tracking_fodf_custom_seeds(script_runner, monkeypatch): np.save(in_custom_seeds, custom_seeds) ret = script_runner.run('scil_tracking_local_dev.py', in_fodf, - in_mask, in_mask, 'local_prob.trk', + in_mask, in_mask, 'local_prob2.trk', '--in_custom_seeds', in_custom_seeds, '--compress', '0.1', '--sh_basis', 'descoteaux07', '--min_length', '20', '--max_length', '200', From 48678a7f57da4fca0df3f8d13952be5ae2ff9ba0 Mon Sep 17 00:00:00 2001 From: Jeremi Levesque Date: Thu, 12 Dec 2024 09:11:40 -0500 Subject: [PATCH 14/44] fix: return rejected with empty sft --- scilpy/tractograms/streamline_operations.py | 8 ++------ .../tractograms/tests/test_streamline_operations.py | 12 ++++++++++++ 2 files changed, 14 insertions(+), 6 deletions(-) diff --git a/scilpy/tractograms/streamline_operations.py b/scilpy/tractograms/streamline_operations.py index 5249b80a8..2e392ee02 100644 --- a/scilpy/tractograms/streamline_operations.py +++ b/scilpy/tractograms/streamline_operations.py @@ -408,19 +408,15 @@ def filter_streamlines_by_length(sft, min_length=0., max_length=np.inf, valid_length_ids = np.logical_and(lengths >= min_length, lengths <= max_length) filtered_sft = sft[valid_length_ids] - - if return_rejected: - rejected_sft = sft[~valid_length_ids] else: - valid_length_ids = [] + valid_length_ids = np.array([], dtype=bool) filtered_sft = sft # Return to original space - sft.to_space(orig_space) filtered_sft.to_space(orig_space) if return_rejected: - rejected_sft.to_space(orig_space) + rejected_sft = sft[~valid_length_ids] return filtered_sft, valid_length_ids, rejected_sft else: return filtered_sft, valid_length_ids diff --git a/scilpy/tractograms/tests/test_streamline_operations.py b/scilpy/tractograms/tests/test_streamline_operations.py index ccddcc972..da22eb514 100644 --- a/scilpy/tractograms/tests/test_streamline_operations.py +++ b/scilpy/tractograms/tests/test_streamline_operations.py @@ -7,6 +7,7 @@ import pytest from dipy.io.streamline import load_tractogram from dipy.tracking.streamlinespeed import length +from dipy.io.stateful_tractogram import StatefulTractogram from scilpy import SCILPY_HOME from scilpy.io.fetcher import fetch_data, get_testing_files_dict @@ -174,6 +175,17 @@ def test_filter_streamlines_by_length(): # Test that streamlines shorter than 100 and longer than 120 were removed. assert np.all(lengths >= min_length) and np.all(lengths <= max_length) + # === 4. Return rejected streamlines with empty sft === + empty_sft = short_sft[[]] # Empty sft from short_sft (chosen arbitrarily) + filtered_sft, _, rejected = filter_streamlines_by_length(empty_sft, + min_length=min_length, + max_length=max_length, + return_rejected=True) + assert isinstance(filtered_sft, StatefulTractogram) + assert isinstance(rejected, StatefulTractogram) + assert len(filtered_sft) == 0 + assert len(rejected) == 0 + def test_filter_streamlines_by_total_length_per_dim(): long_sft = load_tractogram(in_long_sft, in_ref) From 296106897e3f2dd75d034fd11277f8f3cb71bf2e Mon Sep 17 00:00:00 2001 From: Jeremi Levesque Date: Thu, 12 Dec 2024 09:18:10 -0500 Subject: [PATCH 15/44] fix: pep8 --- scilpy/tractograms/tests/test_streamline_operations.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/scilpy/tractograms/tests/test_streamline_operations.py b/scilpy/tractograms/tests/test_streamline_operations.py index da22eb514..d381da976 100644 --- a/scilpy/tractograms/tests/test_streamline_operations.py +++ b/scilpy/tractograms/tests/test_streamline_operations.py @@ -176,11 +176,11 @@ def test_filter_streamlines_by_length(): assert np.all(lengths >= min_length) and np.all(lengths <= max_length) # === 4. Return rejected streamlines with empty sft === - empty_sft = short_sft[[]] # Empty sft from short_sft (chosen arbitrarily) - filtered_sft, _, rejected = filter_streamlines_by_length(empty_sft, - min_length=min_length, - max_length=max_length, - return_rejected=True) + empty_sft = short_sft[[]] # Empty sft from short_sft (chosen arbitrarily) + filtered_sft, _, rejected = \ + filter_streamlines_by_length(empty_sft, min_length=min_length, + max_length=max_length, + return_rejected=True) assert isinstance(filtered_sft, StatefulTractogram) assert isinstance(rejected, StatefulTractogram) assert len(filtered_sft) == 0 From 9ca6d8ae3d1fd7f24e2f9eef226d218ceb3dd7e0 Mon Sep 17 00:00:00 2001 From: CHrlS98 Date: Thu, 12 Dec 2024 09:28:28 -0500 Subject: [PATCH 16/44] BF for asymmetric peaks --- scilpy/viz/backends/fury.py | 4 ++-- scripts/scil_viz_fodf.py | 5 +++++ 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/scilpy/viz/backends/fury.py b/scilpy/viz/backends/fury.py index 3940dacfb..30c5f0be4 100644 --- a/scilpy/viz/backends/fury.py +++ b/scilpy/viz/backends/fury.py @@ -148,7 +148,7 @@ def set_viewport(scene, orientation, slice_index, volume_shape, aspect_ratio): Ratio between viewport's width and height. """ - scene.projection('parallel') + scene.projection(proj_type='parallel') camera = initialize_camera( orientation, slice_index, volume_shape, aspect_ratio) scene.set_camera(position=camera[CamParams.VIEW_POS], @@ -226,7 +226,7 @@ def create_interactive_window(scene, window_size, interactor, Object from Fury containing the 3D scene interactor. """ - showm = window.ShowManager(scene, title=title, + showm = window.ShowManager(scene=scene, title=title, size=window_size, reset_camera=False, interactor_style=interactor) diff --git a/scripts/scil_viz_fodf.py b/scripts/scil_viz_fodf.py index dfc2f2b73..79dee09ed 100755 --- a/scripts/scil_viz_fodf.py +++ b/scripts/scil_viz_fodf.py @@ -155,6 +155,10 @@ def _build_arg_parser(): peaks.add_argument('--peaks_width', default=1.0, type=float, help='Width of peaks segments. [%(default)s]') + + peaks.add_argument('--peaks_opacity', type=float, default=1.0, + help='Peaks opacity. 1 is opaque, 0 is transparent ' + '[%(default)s].') peaks_scale = p.add_argument_group('Peaks scaling arguments', 'Choose ' 'between peaks values and arbitrary ' @@ -329,6 +333,7 @@ def main(): mask, args.peaks_color, args.peaks_width, + args.peaks_opacity, not full_basis) actors.append(peaks_actor) From 5b124f43affba8d50495c5e8cfe7560186c0596f Mon Sep 17 00:00:00 2001 From: CHrlS98 Date: Thu, 12 Dec 2024 09:31:46 -0500 Subject: [PATCH 17/44] PEP8 --- scripts/scil_viz_fodf.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/scil_viz_fodf.py b/scripts/scil_viz_fodf.py index 79dee09ed..1a34c55f4 100755 --- a/scripts/scil_viz_fodf.py +++ b/scripts/scil_viz_fodf.py @@ -155,7 +155,7 @@ def _build_arg_parser(): peaks.add_argument('--peaks_width', default=1.0, type=float, help='Width of peaks segments. [%(default)s]') - + peaks.add_argument('--peaks_opacity', type=float, default=1.0, help='Peaks opacity. 1 is opaque, 0 is transparent ' '[%(default)s].') From 0923930a970bc476e1a3cb0a3b0626f78094c960 Mon Sep 17 00:00:00 2001 From: CHrlS98 Date: Thu, 12 Dec 2024 10:15:44 -0500 Subject: [PATCH 18/44] [ENH] keyword mandatory for kw arguments --- scilpy/viz/backends/fury.py | 4 ++-- scilpy/viz/screenshot.py | 2 +- scilpy/viz/slice.py | 4 ++-- scripts/scil_viz_fodf.py | 12 ++++++------ 4 files changed, 11 insertions(+), 11 deletions(-) diff --git a/scilpy/viz/backends/fury.py b/scilpy/viz/backends/fury.py index 30c5f0be4..06db6d6de 100644 --- a/scilpy/viz/backends/fury.py +++ b/scilpy/viz/backends/fury.py @@ -162,7 +162,7 @@ def set_viewport(scene, orientation, slice_index, volume_shape, aspect_ratio): def create_scene(actors, orientation, slice_index, volume_shape, aspect_ratio, - bg_color=(0, 0, 0)): + *, bg_color=(0, 0, 0)): """ Create a 3D scene containing actors fitting inside a grid. The camera is placed based on the orientation supplied by the user. The projection mode @@ -201,7 +201,7 @@ def create_scene(actors, orientation, slice_index, volume_shape, aspect_ratio, return scene -def create_interactive_window(scene, window_size, interactor, +def create_interactive_window(scene, window_size, interactor, *, title="Viewer", open_window=True): """ Create a 3D window with the content of scene, equiped with an interactor. diff --git a/scilpy/viz/screenshot.py b/scilpy/viz/screenshot.py index 50528db5c..346ca8b3b 100644 --- a/scilpy/viz/screenshot.py +++ b/scilpy/viz/screenshot.py @@ -127,7 +127,7 @@ def screenshot_peaks(img, orientation, slice_ids, size, mask_img=None): img.shape, size) -def compose_image(img_scene, img_size, slice_number, corner_position=(0, 0), +def compose_image(img_scene, img_size, slice_number, *, corner_position=(0, 0), transparency_scene=None, image_alpha=1.0, labelmap_scene=None, labelmap_overlay_alpha=0.7, overlays_scene=None, overlays_alpha=0.7, diff --git a/scilpy/viz/slice.py b/scilpy/viz/slice.py index ff73cc177..0df655c3e 100644 --- a/scilpy/viz/slice.py +++ b/scilpy/viz/slice.py @@ -14,7 +14,7 @@ from scilpy.viz.utils import affine_from_offset -def create_texture_slicer(texture, orientation, slice_index, mask=None, +def create_texture_slicer(texture, orientation, slice_index, *, mask=None, value_range=None, opacity=1.0, offset=0.5, lut=None, interpolation='nearest'): """ @@ -126,7 +126,7 @@ def create_contours_slicer(data, contour_values, orientation, slice_index, return contours_slicer -def create_peaks_slicer(data, orientation, slice_index, peak_values=None, +def create_peaks_slicer(data, orientation, slice_index, *, peak_values=None, mask=None, color=None, peaks_width=1.0, opacity=1.0, symmetric=False): """ diff --git a/scripts/scil_viz_fodf.py b/scripts/scil_viz_fodf.py index 1a34c55f4..e1223d265 100755 --- a/scripts/scil_viz_fodf.py +++ b/scripts/scil_viz_fodf.py @@ -329,12 +329,12 @@ def main(): peaks_actor = create_peaks_slicer(data['peaks'], args.axis_name, args.slice_index, - peaks_values, - mask, - args.peaks_color, - args.peaks_width, - args.peaks_opacity, - not full_basis) + peak_values=peaks_values, + mask=mask, + color=args.peaks_color, + peaks_width=args.peaks_width, + opacity=args.peaks_opacity, + symmetric=not full_basis) actors.append(peaks_actor) From 9a245c67b96e654664fec9c730a119192317d4d8 Mon Sep 17 00:00:00 2001 From: frheault Date: Thu, 12 Dec 2024 10:25:51 -0500 Subject: [PATCH 19/44] Switch to map coordinates --- scilpy/connectivity/connectivity.py | 38 ++++++++++++++--------------- 1 file changed, 19 insertions(+), 19 deletions(-) diff --git a/scilpy/connectivity/connectivity.py b/scilpy/connectivity/connectivity.py index dd6b75901..15f259345 100644 --- a/scilpy/connectivity/connectivity.py +++ b/scilpy/connectivity/connectivity.py @@ -10,14 +10,18 @@ import h5py import nibabel as nib import numpy as np +from scipy.ndimage import map_coordinates from scilpy.image.labels import get_data_as_labels from scilpy.io.hdf5 import reconstruct_streamlines_from_hdf5 from scilpy.tractanalysis.reproducibility_measures import \ compute_bundle_adjacency_voxel from scilpy.tractanalysis.streamlines_metrics import compute_tract_counts_map +from scilpy.tractograms.streamline_operations import \ + resample_streamlines_num_points from scilpy.utils.metrics_tools import compute_lesion_stats + d = threading.local() @@ -31,7 +35,7 @@ def compute_triu_connectivity_from_labels(tractogram, data_labels, ---------- tractogram: StatefulTractogram, or list[np.ndarray] Streamlines. A StatefulTractogram input is recommanded. - When using directly with a list of streamlines, streamlinee must be in + When using directly with a list of streamlines, streamlines must be in vox space, corner origin. data_labels: np.ndarray The loaded nifti image. @@ -58,9 +62,11 @@ def compute_triu_connectivity_from_labels(tractogram, data_labels, # Vox space, corner origin # = we can get the nearest neighbor easily. # Coord 0 = voxel 0. Coord 0.9 = voxel 0. Coord 1 = voxel 1. - tractogram.to_vox() - tractogram.to_corner() - streamlines = tractogram.streamlines + sfs_2_pts = resample_streamlines_num_points(tractogram, 2) + sfs_2_pts.to_vox() + sfs_2_pts.to_center() + streamlines = sfs_2_pts.streamlines + else: streamlines = tractogram @@ -71,23 +77,17 @@ def compute_triu_connectivity_from_labels(tractogram, data_labels, .format(nb_labels)) matrix = np.zeros((nb_labels, nb_labels), dtype=int) - start_labels = [] - end_labels = [] - - for s in streamlines: - start = ordered_labels.index( - data_labels[tuple(np.floor(s[0, :]).astype(int))]) - end = ordered_labels.index( - data_labels[tuple(np.floor(s[-1, :]).astype(int))]) - start_labels.append(start) - end_labels.append(end) + labels = map_coordinates(data_labels, streamlines._data.T, + order=0, mode='nearest') + start_labels = labels[0::2] + end_labels = labels[1::2] - matrix[start, end] += 1 - if start != end: - matrix[end, start] += 1 + # sort each pair of labels for start to be smaller than end + start_labels, end_labels = zip(*[sorted(pair) for pair in + zip(start_labels, end_labels)]) - matrix = np.triu(matrix) + np.add.at(matrix, (start_labels, end_labels), 1) assert matrix.sum() == len(streamlines) # Rejecting background @@ -249,7 +249,7 @@ def compute_connectivity_matrices_from_hdf5( if compute_volume: measures_to_return['volume_mm3'] = np.count_nonzero(density) * \ - np.prod(voxel_sizes) + np.prod(voxel_sizes) if compute_streamline_count: measures_to_return['streamline_count'] = len(streamlines) From 3918ffb2c2def5bcbcccaf69a4d18f4de1964597 Mon Sep 17 00:00:00 2001 From: frheault Date: Thu, 12 Dec 2024 10:29:34 -0500 Subject: [PATCH 20/44] Modify doc for to_center() --- scilpy/connectivity/connectivity.py | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/scilpy/connectivity/connectivity.py b/scilpy/connectivity/connectivity.py index 15f259345..11ed51a39 100644 --- a/scilpy/connectivity/connectivity.py +++ b/scilpy/connectivity/connectivity.py @@ -36,7 +36,7 @@ def compute_triu_connectivity_from_labels(tractogram, data_labels, tractogram: StatefulTractogram, or list[np.ndarray] Streamlines. A StatefulTractogram input is recommanded. When using directly with a list of streamlines, streamlines must be in - vox space, corner origin. + vox space, center origin. data_labels: np.ndarray The loaded nifti image. keep_background: Bool @@ -59,9 +59,7 @@ def compute_triu_connectivity_from_labels(tractogram, data_labels, For each streamline, the label at ending point. """ if isinstance(tractogram, StatefulTractogram): - # Vox space, corner origin - # = we can get the nearest neighbor easily. - # Coord 0 = voxel 0. Coord 0.9 = voxel 0. Coord 1 = voxel 1. + # vox space, center origin: compatible with map_coordinates sfs_2_pts = resample_streamlines_num_points(tractogram, 2) sfs_2_pts.to_vox() sfs_2_pts.to_center() @@ -78,8 +76,7 @@ def compute_triu_connectivity_from_labels(tractogram, data_labels, matrix = np.zeros((nb_labels, nb_labels), dtype=int) - labels = map_coordinates(data_labels, streamlines._data.T, - order=0, mode='nearest') + labels = map_coordinates(data_labels, streamlines._data.T, order=0) start_labels = labels[0::2] end_labels = labels[1::2] From 1c31cc8f679f7b3ef8f1276a25fa39d20a4cc3b7 Mon Sep 17 00:00:00 2001 From: Jeremi Levesque Date: Thu, 12 Dec 2024 10:36:18 -0500 Subject: [PATCH 21/44] fix: restore missing line --- scilpy/tractograms/streamline_operations.py | 1 + 1 file changed, 1 insertion(+) diff --git a/scilpy/tractograms/streamline_operations.py b/scilpy/tractograms/streamline_operations.py index 2e392ee02..6e244af92 100644 --- a/scilpy/tractograms/streamline_operations.py +++ b/scilpy/tractograms/streamline_operations.py @@ -413,6 +413,7 @@ def filter_streamlines_by_length(sft, min_length=0., max_length=np.inf, filtered_sft = sft # Return to original space + sft.to_space(orig_space) filtered_sft.to_space(orig_space) if return_rejected: From 902a7f83edb10d6cbfba681501fe7fc372975cde Mon Sep 17 00:00:00 2001 From: Jeremi Levesque Date: Thu, 12 Dec 2024 10:52:35 -0500 Subject: [PATCH 22/44] fix: use save_rejected argument --- scripts/scil_tractogram_filter_by_length.py | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/scripts/scil_tractogram_filter_by_length.py b/scripts/scil_tractogram_filter_by_length.py index 1aed05283..f82f3d6c4 100755 --- a/scripts/scil_tractogram_filter_by_length.py +++ b/scripts/scil_tractogram_filter_by_length.py @@ -48,8 +48,8 @@ def _build_arg_parser(): help='Do not write file if there is no streamline.') p.add_argument('--display_counts', action='store_true', help='Print streamline count before and after filtering') - p.add_argument('--save_rejected', action='store_true', - help='Save rejected streamlines to output tractogram.') + p.add_argument('--out_rejected', + help='If specified, save rejected streamlines to this file.') add_json_args(p) add_reference_arg(p) add_verbose_arg(p) @@ -75,8 +75,17 @@ def main(): sft = load_tractogram_with_reference(parser, args, args.in_tractogram) # Processing - new_sft, _, outliers_sft = filter_streamlines_by_length( - sft, args.minL, args.maxL, return_rejected=True) + return_rejected = args.out_rejected is not None + + # Returns (new_sft, _, [rejected_sft]) in filtered_result + filtered_result = filter_streamlines_by_length( + sft, args.minL, args.maxL, return_rejected=return_rejected) + + new_sft = filtered_result[0] + + if return_rejected: + rejected_sft = filtered_result[2] + save_tractogram(rejected_sft, args.out_rejected, args.no_empty) if args.display_counts: sc_bf = len(sft.streamlines) From e11c19f866dc38f860102542e5f6b5924ced37dc Mon Sep 17 00:00:00 2001 From: Jeremi Levesque Date: Thu, 12 Dec 2024 10:53:49 -0500 Subject: [PATCH 23/44] fix: pep8 --- scripts/scil_tractogram_filter_by_length.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/scripts/scil_tractogram_filter_by_length.py b/scripts/scil_tractogram_filter_by_length.py index f82f3d6c4..9b3ba5871 100755 --- a/scripts/scil_tractogram_filter_by_length.py +++ b/scripts/scil_tractogram_filter_by_length.py @@ -49,7 +49,8 @@ def _build_arg_parser(): p.add_argument('--display_counts', action='store_true', help='Print streamline count before and after filtering') p.add_argument('--out_rejected', - help='If specified, save rejected streamlines to this file.') + help='If specified, save rejected streamlines to this ' + 'file.') add_json_args(p) add_reference_arg(p) add_verbose_arg(p) @@ -82,7 +83,7 @@ def main(): sft, args.minL, args.maxL, return_rejected=return_rejected) new_sft = filtered_result[0] - + if return_rejected: rejected_sft = filtered_result[2] save_tractogram(rejected_sft, args.out_rejected, args.no_empty) From a1ac0043b59a159f4561913117e015e9be366cba Mon Sep 17 00:00:00 2001 From: elyz0801 Date: Thu, 12 Dec 2024 11:06:22 -0500 Subject: [PATCH 24/44] create viz plot tool --- scilpy/utils/metrics_tools.py | 79 +----------- scilpy/viz/plot.py | 173 +++++++++++++++++++++++++++ scripts/scil_dti_metrics.py | 76 +----------- scripts/scil_plot_stats_per_point.py | 2 +- 4 files changed, 180 insertions(+), 150 deletions(-) create mode 100644 scilpy/viz/plot.py diff --git a/scilpy/utils/metrics_tools.py b/scilpy/utils/metrics_tools.py index 5fdc5ec22..a8e6787d9 100644 --- a/scilpy/utils/metrics_tools.py +++ b/scilpy/utils/metrics_tools.py @@ -3,8 +3,6 @@ import logging import os -import matplotlib -import matplotlib.pyplot as plt import numpy as np from scilpy.tractanalysis.streamlines_metrics import compute_tract_counts_map @@ -301,79 +299,4 @@ def get_bundle_metrics_mean_std_per_point(streamlines, bundle_name, weights=label_weight)) label_stats['mean'] = float(label_mean) label_stats['std'] = float(label_std) - return stats - - -def plot_metrics_stats(means, stds, title=None, xlabel=None, - ylabel=None, figlabel=None, fill_color=None, - display_means=False): - """ - Plots the mean of a metric along n points with the standard deviation. - - Parameters - ---------- - means: Numpy 1D (or 2D) array of size n - Mean of the metric along n points. - stds: Numpy 1D (or 2D) array of size n - Standard deviation of the metric along n points. - title: string - Title of the figure. - xlabel: string - Label of the X axis. - ylabel: string - Label of the Y axis (suggestion: the metric name). - figlabel: string - Label of the figure (only metadata in the figure object returned). - fill_color: string - Hexadecimal RGB color filling the region between mean ± std. The - hexadecimal RGB color should be formatted as #RRGGBB - display_means: bool - Display the subjects means as semi-transparent line - Return - ------ - The figure object. - """ - matplotlib.style.use('ggplot') - - fig, ax = plt.subplots() - - # Set optional information to the figure, if required. - if title is not None: - ax.set_title(title) - if xlabel is not None: - ax.set_xlabel(xlabel) - if ylabel is not None: - ax.set_ylabel(ylabel) - if figlabel is not None: - fig.set_label(figlabel) - - if means.ndim > 1: - mean = np.average(means, axis=1) - std = np.average(stds, axis=1) - alpha = 0.5 - else: - mean = np.array(means).ravel() - std = np.array(stds).ravel() - alpha = 0.9 - - dim = np.arange(1, len(mean)+1, 1) - - if len(mean) <= 20: - ax.xaxis.set_ticks(dim) - - ax.set_xlim(0, len(mean)+1) - - if means.ndim > 1 and display_means: - for i in range(means.shape[-1]): - ax.plot(dim, means[:, i], color="k", linewidth=1, - solid_capstyle='round', alpha=0.1) - - # Plot the mean line. - ax.plot(dim, mean, color="k", linewidth=5, solid_capstyle='round') - - # Plot the std - plt.fill_between(dim, mean - std, mean + std, - facecolor=fill_color, alpha=alpha) - - plt.close(fig) - return fig + return stats \ No newline at end of file diff --git a/scilpy/viz/plot.py b/scilpy/viz/plot.py new file mode 100644 index 000000000..b109dcaf1 --- /dev/null +++ b/scilpy/viz/plot.py @@ -0,0 +1,173 @@ + +import matplotlib +import matplotlib.pyplot as plt +import numpy as np + +def plot_metrics_stats(means, stds, title=None, xlabel=None, + ylabel=None, figlabel=None, fill_color=None, + display_means=False): + """ + Plots the mean of a metric along n points with the standard deviation. + + Parameters + ---------- + means: Numpy 1D (or 2D) array of size n + Mean of the metric along n points. + stds: Numpy 1D (or 2D) array of size n + Standard deviation of the metric along n points. + title: string + Title of the figure. + xlabel: string + Label of the X axis. + ylabel: string + Label of the Y axis (suggestion: the metric name). + figlabel: string + Label of the figure (only metadata in the figure object returned). + fill_color: string + Hexadecimal RGB color filling the region between mean ± std. The + hexadecimal RGB color should be formatted as #RRGGBB + display_means: bool + Display the subjects means as semi-transparent line + Return + ------ + The figure object. + """ + matplotlib.style.use('ggplot') + + fig, ax = plt.subplots() + + # Set optional information to the figure, if required. + if title is not None: + ax.set_title(title) + if xlabel is not None: + ax.set_xlabel(xlabel) + if ylabel is not None: + ax.set_ylabel(ylabel) + if figlabel is not None: + fig.set_label(figlabel) + + if means.ndim > 1: + mean = np.average(means, axis=1) + std = np.average(stds, axis=1) + alpha = 0.5 + else: + mean = np.array(means).ravel() + std = np.array(stds).ravel() + alpha = 0.9 + + dim = np.arange(1, len(mean)+1, 1) + + if len(mean) <= 20: + ax.xaxis.set_ticks(dim) + + ax.set_xlim(0, len(mean)+1) + + if means.ndim > 1 and display_means: + for i in range(means.shape[-1]): + ax.plot(dim, means[:, i], color="k", linewidth=1, + solid_capstyle='round', alpha=0.1) + + # Plot the mean line. + ax.plot(dim, mean, color="k", linewidth=5, solid_capstyle='round') + + # Plot the std + plt.fill_between(dim, mean - std, mean + std, + facecolor=fill_color, alpha=alpha) + + plt.close(fig) + return fig + + +def plot_residuals(data_diff, mask, R_k, q1, q3, iqr, residual_basename): + """ + Plots residual statistics for DWI. + + Parameters + ---------- + data_diff: np.ndarray + The 4D residuals between the DWI and predicted data. + mask : Numpy 3D array or None + Mask array indicating the region of interest for computing residuals. + If None, residuals are computed for the entire dataset. + R_k : Numpy 1D array + Mean residual values for each DWI volume. + q1 : Numpy 1D array + First quartile values for each DWI volume. + q3 : Numpy 1D array + Third quartile values for each DWI volume. + iqr : Numpy 1D array + Interquartile range (Q3 - Q1) for each DWI volume. + residual_basename : string + Basename for saving the output plot file. The file will be saved as + '_residuals_stats.png'. + Returns + ------- + None + + The function generates a plot and saves it as a PNG file. + + """ + # Showing results in graph + # Note that stats will be computed manually and plotted using bxp + # but could be computed using stats = cbook.boxplot_stats + # or pyplot.boxplot(x) + + + # Initializing stats as a List[dict] + stats = [dict.fromkeys(['label', 'mean', 'iqr', 'cilo', 'cihi', + 'whishi', 'whislo', 'fliers', 'q1', + 'med', 'q3'], []) + for _ in range(data_diff.shape[-1])] + + nb_voxels = np.count_nonzero(mask) + percent_outliers = np.zeros(data_diff.shape[-1], dtype=np.float32) + for k in range(data_diff.shape[-1]): + stats[k]['med'] = (q1[k] + q3[k]) / 2 + stats[k]['mean'] = R_k[k] + stats[k]['q1'] = q1[k] + stats[k]['q3'] = q3[k] + stats[k]['whislo'] = q1[k] - 1.5 * iqr[k] + stats[k]['whishi'] = q3[k] + 1.5 * iqr[k] + stats[k]['label'] = k + + # Outliers are observations that fall below Q1 - 1.5(IQR) or + # above Q3 + 1.5(IQR) We check if a voxel is an outlier only if + # we have a mask, else we are biased. + if mask is not None: + x = data_diff[..., k] + outliers = (x < stats[k]['whislo']) | (x > stats[k]['whishi']) + percent_outliers[k] = np.sum(outliers) / nb_voxels * 100 + # What would be our definition of too many outliers? + # Maybe mean(all_means)+-3SD? + # Or we let people choose based on the figure. + # if percent_outliers[k] > ???? : + # logger.warning(' Careful! Diffusion-Weighted Image' + # ' i=%s has %s %% outlier voxels', + # k, percent_outliers[k]) + + if mask is None: + fig, axe = plt.subplots(nrows=1, ncols=1, squeeze=False) + else: + fig, axe = plt.subplots(nrows=1, ncols=2, squeeze=False, + figsize=[10, 4.8]) + # Default is [6.4, 4.8]. Increasing width to see better. + + medianprops = dict(linestyle='-', linewidth=2.5, color='firebrick') + meanprops = dict(linestyle='-', linewidth=2.5, color='green') + axe[0, 0].bxp(stats, showmeans=True, meanline=True, showfliers=False, + medianprops=medianprops, meanprops=meanprops) + axe[0, 0].set_xlabel('DW image') + axe[0, 0].set_ylabel('Residuals per DWI volume. Red is median,\n' + 'green is mean. Whiskers are 1.5*interquartile') + axe[0, 0].set_title('Residuals') + axe[0, 0].set_xticks(range(0, q1.shape[0], 5)) + axe[0, 0].set_xticklabels(range(0, q1.shape[0], 5)) + + if mask is not None: + axe[0, 1].plot(range(data_diff.shape[-1]), percent_outliers) + axe[0, 1].set_xticks(range(0, q1.shape[0], 5)) + axe[0, 1].set_xticklabels(range(0, q1.shape[0], 5)) + axe[0, 1].set_xlabel('DW image') + axe[0, 1].set_ylabel('Percentage of outlier voxels') + axe[0, 1].set_title('Outliers') + plt.savefig(residual_basename + '_residuals_stats.png') \ No newline at end of file diff --git a/scripts/scil_dti_metrics.py b/scripts/scil_dti_metrics.py index 27d3074c2..81161b009 100755 --- a/scripts/scil_dti_metrics.py +++ b/scripts/scil_dti_metrics.py @@ -27,7 +27,6 @@ import argparse import logging -import matplotlib.pyplot as plt import nibabel as nib import numpy as np @@ -54,6 +53,7 @@ is_normalized_bvecs, normalize_bvecs) from scilpy.utils.filenames import add_filename_suffix, split_name_with_nii +from scilpy.viz.plot import plot_residuals logger = logging.getLogger("DTI_Metrics") logger.setLevel(logging.INFO) @@ -149,75 +149,6 @@ def _build_arg_parser(): return p -def _plot_residuals(args, data_diff, mask, R_k, q1, q3, iqr, residual_basename): - # Showing results in graph - # Note that stats will be computed manually and plotted using bxp - # but could be computed using stats = cbook.boxplot_stats - # or pyplot.boxplot(x) - if mask is None: - logging.info("Outlier detection will not be performed, since no " - "mask was provided.") - - # Initializing stats as a List[dict] - stats = [dict.fromkeys(['label', 'mean', 'iqr', 'cilo', 'cihi', - 'whishi', 'whislo', 'fliers', 'q1', - 'med', 'q3'], []) - for _ in range(data_diff.shape[-1])] - - nb_voxels = np.count_nonzero(mask) - percent_outliers = np.zeros(data_diff.shape[-1], dtype=np.float32) - for k in range(data_diff.shape[-1]): - stats[k]['med'] = (q1[k] + q3[k]) / 2 - stats[k]['mean'] = R_k[k] - stats[k]['q1'] = q1[k] - stats[k]['q3'] = q3[k] - stats[k]['whislo'] = q1[k] - 1.5 * iqr[k] - stats[k]['whishi'] = q3[k] + 1.5 * iqr[k] - stats[k]['label'] = k - - # Outliers are observations that fall below Q1 - 1.5(IQR) or - # above Q3 + 1.5(IQR) We check if a voxel is an outlier only if - # we have a mask, else we are biased. - if args.mask is not None: - x = data_diff[..., k] - outliers = (x < stats[k]['whislo']) | (x > stats[k]['whishi']) - percent_outliers[k] = np.sum(outliers) / nb_voxels * 100 - # What would be our definition of too many outliers? - # Maybe mean(all_means)+-3SD? - # Or we let people choose based on the figure. - # if percent_outliers[k] > ???? : - # logger.warning(' Careful! Diffusion-Weighted Image' - # ' i=%s has %s %% outlier voxels', - # k, percent_outliers[k]) - - if args.mask is None: - fig, axe = plt.subplots(nrows=1, ncols=1, squeeze=False) - else: - fig, axe = plt.subplots(nrows=1, ncols=2, squeeze=False, - figsize=[10, 4.8]) - # Default is [6.4, 4.8]. Increasing width to see better. - - medianprops = dict(linestyle='-', linewidth=2.5, color='firebrick') - meanprops = dict(linestyle='-', linewidth=2.5, color='green') - axe[0, 0].bxp(stats, showmeans=True, meanline=True, showfliers=False, - medianprops=medianprops, meanprops=meanprops) - axe[0, 0].set_xlabel('DW image') - axe[0, 0].set_ylabel('Residuals per DWI volume. Red is median,\n' - 'green is mean. Whiskers are 1.5*interquartile') - axe[0, 0].set_title('Residuals') - axe[0, 0].set_xticks(range(0, q1.shape[0], 5)) - axe[0, 0].set_xticklabels(range(0, q1.shape[0], 5)) - - if args.mask is not None: - axe[0, 1].plot(range(data_diff.shape[-1]), percent_outliers) - axe[0, 1].set_xticks(range(0, q1.shape[0], 5)) - axe[0, 1].set_xticklabels(range(0, q1.shape[0], 5)) - axe[0, 1].set_xlabel('DW image') - axe[0, 1].set_ylabel('Percentage of outlier voxels') - axe[0, 1].set_title('Outliers') - plt.savefig(residual_basename + '_residuals_stats.png') - - def main(): parser = _build_arg_parser() args = parser.parse_args() @@ -404,6 +335,9 @@ def main(): add_filename_suffix(args.pulsation, '_std_b0')) if args.residual: + if mask is None: + logging.info("Outlier detection will not be performed, since no " + "mask was provided.") # Mean residual image S0 = np.mean(data[..., gtab.b0s_mask], axis=-1) tenfit2_predict = np.zeros(data.shape, dtype=np.float32) @@ -435,7 +369,7 @@ def main(): np.save(add_filename_suffix(res_stats_basename, "_std_residuals"), std) # Plotting and saving figure - _plot_residuals(args, data_diff, mask, R_k, q1, q3, iqr, + plot_residuals(data_diff, mask, R_k, q1, q3, iqr, residual_basename) diff --git a/scripts/scil_plot_stats_per_point.py b/scripts/scil_plot_stats_per_point.py index 4f7a6e321..777bce27d 100755 --- a/scripts/scil_plot_stats_per_point.py +++ b/scripts/scil_plot_stats_per_point.py @@ -21,7 +21,7 @@ from scilpy.io.utils import (add_overwrite_arg, assert_inputs_exist, assert_output_dirs_exist_and_empty, add_verbose_arg) -from scilpy.utils.metrics_tools import plot_metrics_stats +from scilpy.viz.plot import plot_metrics_stats def _build_arg_parser(): From 355eca52a6bc039dda8e9e27128880167e1db1c6 Mon Sep 17 00:00:00 2001 From: AntoineTheb Date: Thu, 12 Dec 2024 11:12:32 -0500 Subject: [PATCH 25/44] FIX: div by 0 in residual --- scripts/scil_dti_metrics.py | 10 +++++++--- scripts/tests/test_dti_metrics.py | 28 +++++++++++++++++++++++++++- 2 files changed, 34 insertions(+), 4 deletions(-) diff --git a/scripts/scil_dti_metrics.py b/scripts/scil_dti_metrics.py index 27d3074c2..57c6c20ec 100755 --- a/scripts/scil_dti_metrics.py +++ b/scripts/scil_dti_metrics.py @@ -149,7 +149,9 @@ def _build_arg_parser(): return p -def _plot_residuals(args, data_diff, mask, R_k, q1, q3, iqr, residual_basename): +def _plot_residuals( + args, data_diff, mask, R_k, q1, q3, iqr, residual_basename +): # Showing results in graph # Note that stats will be computed manually and plotted using bxp # but could be computed using stats = cbook.boxplot_stats @@ -178,7 +180,7 @@ def _plot_residuals(args, data_diff, mask, R_k, q1, q3, iqr, residual_basename): # Outliers are observations that fall below Q1 - 1.5(IQR) or # above Q3 + 1.5(IQR) We check if a voxel is an outlier only if # we have a mask, else we are biased. - if args.mask is not None: + if args.mask is not None and nb_voxels > 0: x = data_diff[..., k] outliers = (x < stats[k]['whislo']) | (x > stats[k]['whishi']) percent_outliers[k] = np.sum(outliers) / nb_voxels * 100 @@ -406,6 +408,7 @@ def main(): if args.residual: # Mean residual image S0 = np.mean(data[..., gtab.b0s_mask], axis=-1) + tenfit2_predict = np.zeros(data.shape, dtype=np.float32) for i in range(data.shape[0]): @@ -414,7 +417,8 @@ def main(): else: tenfit2 = tenmodel.fit(data[i, :, :, :]) - tenfit2_predict[i, :, :, :] = tenfit2.predict(gtab, S0[i, :, :]) + S0_i = np.maximum(S0[i, :, :], tenfit2.model.min_signal) + tenfit2_predict[i, :, :, :] = tenfit2.predict(gtab, S0_i) R, data_diff = compute_residuals( predicted_data=tenfit2_predict.astype(np.float32), diff --git a/scripts/tests/test_dti_metrics.py b/scripts/tests/test_dti_metrics.py index 085373f6c..96c002ea5 100644 --- a/scripts/tests/test_dti_metrics.py +++ b/scripts/tests/test_dti_metrics.py @@ -17,7 +17,7 @@ def test_help_option(script_runner): assert ret.success -def test_execution_processing(script_runner, monkeypatch): +def test_execution_processing_diff_metrics(script_runner, monkeypatch): monkeypatch.chdir(os.path.expanduser(tmp_dir.name)) in_dwi = os.path.join(SCILPY_HOME, 'processing', 'dwi_crop_1000.nii.gz') in_bval = os.path.join(SCILPY_HOME, 'processing', '1000.bval') @@ -39,11 +39,37 @@ def test_execution_processing(script_runner, monkeypatch): '--mask', mask_uint8) assert ret.success + +def test_execution_processing_b0_threshold(script_runner, monkeypatch): + monkeypatch.chdir(os.path.expanduser(tmp_dir.name)) + in_dwi = os.path.join(SCILPY_HOME, 'processing', 'dwi_crop_1000.nii.gz') + in_bval = os.path.join(SCILPY_HOME, 'processing', '1000.bval') + in_bvec = os.path.join(SCILPY_HOME, 'processing', '1000.bvec') + + # No mask fitting with this data? Creating our own. + mask = os.path.join(SCILPY_HOME, 'processing', 'ad.nii.gz') + mask_uint8 = os.path.join('mask_uint8.nii.gz') + script_runner.run('scil_volume_math.py', 'convert', + mask, mask_uint8, '--data_type', 'uint8') + ret = script_runner.run('scil_dti_metrics.py', in_dwi, in_bval, in_bvec, '--not_all', '--fa', 'fa.nii.gz', '--b0_threshold', '1', '-f') assert not ret.success + +def test_execution_processing_rgb(script_runner, monkeypatch): + monkeypatch.chdir(os.path.expanduser(tmp_dir.name)) + in_dwi = os.path.join(SCILPY_HOME, 'processing', 'dwi_crop_1000.nii.gz') + in_bval = os.path.join(SCILPY_HOME, 'processing', '1000.bval') + in_bvec = os.path.join(SCILPY_HOME, 'processing', '1000.bvec') + + # No mask fitting with this data? Creating our own. + mask = os.path.join(SCILPY_HOME, 'processing', 'ad.nii.gz') + mask_uint8 = os.path.join('mask_uint8.nii.gz') + script_runner.run('scil_volume_math.py', 'convert', + mask, mask_uint8, '--data_type', 'uint8') + ret = script_runner.run('scil_dti_metrics.py', in_dwi, in_bval, in_bvec, '--not_all', '--rgb', 'rgb.nii.gz', '-f') From e60f555d2df2b53ab1978dfc2e733e493a26bfc2 Mon Sep 17 00:00:00 2001 From: Jeremi Levesque Date: Thu, 12 Dec 2024 11:17:05 -0500 Subject: [PATCH 26/44] fix: add rejection testing scripts --- .../tests/test_tractogram_filter_by_length.py | 45 +++++++++++++++++++ 1 file changed, 45 insertions(+) diff --git a/scripts/tests/test_tractogram_filter_by_length.py b/scripts/tests/test_tractogram_filter_by_length.py index fdac121ef..f63e2d0bd 100644 --- a/scripts/tests/test_tractogram_filter_by_length.py +++ b/scripts/tests/test_tractogram_filter_by_length.py @@ -6,6 +6,7 @@ from scilpy import SCILPY_HOME from scilpy.io.fetcher import fetch_data, get_testing_files_dict +from scilpy.io.streamlines import load_tractogram # If they already exist, this only takes 5 seconds (check md5sum) fetch_data(get_testing_files_dict(), keys=['filtering.zip']) @@ -20,9 +21,53 @@ def test_help_option(script_runner): def test_execution_filtering(script_runner, monkeypatch): monkeypatch.chdir(os.path.expanduser(tmp_dir.name)) + # Effectively, this doesn't filter anything, since bundle_4.trk has + # all streamlines with lengths of 130mm. This is just to test the + # script execution. in_bundle = os.path.join(SCILPY_HOME, 'filtering', 'bundle_4.trk') ret = script_runner.run('scil_tractogram_filter_by_length.py', in_bundle, 'bundle_4_filtered.trk', '--minL', '125', '--maxL', '130') + + sft = load_tractogram('bundle_4_filtered.trk', 'same') + assert len(sft) == 52 + + assert ret.success + +def test_rejected_filtering(script_runner, monkeypatch): + monkeypatch.chdir(os.path.expanduser(tmp_dir.name)) + in_bundle = os.path.join(SCILPY_HOME, 'filtering', + 'bundle_all_1mm.trk') + ret = script_runner.run('scil_tractogram_filter_by_length.py', + in_bundle, 'bundle_all_1mm_filtered.trk', + '--minL', '125', '--maxL', '130', + '--out_rejected', 'bundle_all_1mm_rejected.trk') assert ret.success + assert os.path.exists('bundle_all_1mm_rejected.trk') + assert os.path.exists('bundle_all_1mm_rejected.trk') + + sft = load_tractogram('bundle_all_1mm_filtered.trk', 'same') + rejected_sft = load_tractogram('bundle_all_1mm_rejected.trk', 'same') + + assert len(sft) == 266 + assert len(rejected_sft) == 2824 + +def test_rejected_filtering_no_rejection(script_runner, monkeypatch): + monkeypatch.chdir(os.path.expanduser(tmp_dir.name)) + in_bundle = os.path.join(SCILPY_HOME, 'filtering', + 'bundle_4.trk') + ret = script_runner.run('scil_tractogram_filter_by_length.py', + in_bundle, 'bundle_4_filtered.trk', + '--minL', '125', '--maxL', '130', + '--out_rejected', 'bundle_4_rejected.trk') + assert ret.success + + # File should be created even though there are no rejected streamlines + assert os.path.exists('bundle_4_rejected.trk') + + sft = load_tractogram('bundle_4_filtered.trk', 'same') + rejected_sft = load_tractogram('bundle_4_rejected.trk', 'same') + + assert len(sft) == 52 + assert len(rejected_sft) == 0 From d78e0d9cdaf70f518feef089134648405de11a67 Mon Sep 17 00:00:00 2001 From: Jeremi Levesque Date: Thu, 12 Dec 2024 11:22:03 -0500 Subject: [PATCH 27/44] fix: pep8 --- scripts/tests/test_tractogram_filter_by_length.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/scripts/tests/test_tractogram_filter_by_length.py b/scripts/tests/test_tractogram_filter_by_length.py index f63e2d0bd..85e94e0f3 100644 --- a/scripts/tests/test_tractogram_filter_by_length.py +++ b/scripts/tests/test_tractogram_filter_by_length.py @@ -29,12 +29,13 @@ def test_execution_filtering(script_runner, monkeypatch): ret = script_runner.run('scil_tractogram_filter_by_length.py', in_bundle, 'bundle_4_filtered.trk', '--minL', '125', '--maxL', '130') - + sft = load_tractogram('bundle_4_filtered.trk', 'same') assert len(sft) == 52 assert ret.success + def test_rejected_filtering(script_runner, monkeypatch): monkeypatch.chdir(os.path.expanduser(tmp_dir.name)) in_bundle = os.path.join(SCILPY_HOME, 'filtering', @@ -49,10 +50,11 @@ def test_rejected_filtering(script_runner, monkeypatch): sft = load_tractogram('bundle_all_1mm_filtered.trk', 'same') rejected_sft = load_tractogram('bundle_all_1mm_rejected.trk', 'same') - + assert len(sft) == 266 assert len(rejected_sft) == 2824 + def test_rejected_filtering_no_rejection(script_runner, monkeypatch): monkeypatch.chdir(os.path.expanduser(tmp_dir.name)) in_bundle = os.path.join(SCILPY_HOME, 'filtering', @@ -62,12 +64,12 @@ def test_rejected_filtering_no_rejection(script_runner, monkeypatch): '--minL', '125', '--maxL', '130', '--out_rejected', 'bundle_4_rejected.trk') assert ret.success - + # File should be created even though there are no rejected streamlines assert os.path.exists('bundle_4_rejected.trk') sft = load_tractogram('bundle_4_filtered.trk', 'same') rejected_sft = load_tractogram('bundle_4_rejected.trk', 'same') - + assert len(sft) == 52 assert len(rejected_sft) == 0 From 68c11e204642d275b5e74ba85aefdd2467e67c81 Mon Sep 17 00:00:00 2001 From: elyz0801 Date: Thu, 12 Dec 2024 11:30:19 -0500 Subject: [PATCH 28/44] pep8 --- scilpy/utils/metrics_tools.py | 2 +- scilpy/viz/plot.py | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/scilpy/utils/metrics_tools.py b/scilpy/utils/metrics_tools.py index a8e6787d9..31f7f2b85 100644 --- a/scilpy/utils/metrics_tools.py +++ b/scilpy/utils/metrics_tools.py @@ -299,4 +299,4 @@ def get_bundle_metrics_mean_std_per_point(streamlines, bundle_name, weights=label_weight)) label_stats['mean'] = float(label_mean) label_stats['std'] = float(label_std) - return stats \ No newline at end of file + return stats diff --git a/scilpy/viz/plot.py b/scilpy/viz/plot.py index b109dcaf1..d7913619b 100644 --- a/scilpy/viz/plot.py +++ b/scilpy/viz/plot.py @@ -3,6 +3,7 @@ import matplotlib.pyplot as plt import numpy as np + def plot_metrics_stats(means, stds, title=None, xlabel=None, ylabel=None, figlabel=None, fill_color=None, display_means=False): @@ -111,9 +112,8 @@ def plot_residuals(data_diff, mask, R_k, q1, q3, iqr, residual_basename): # Note that stats will be computed manually and plotted using bxp # but could be computed using stats = cbook.boxplot_stats # or pyplot.boxplot(x) - - # Initializing stats as a List[dict] + # Initializing stats as a List[dict] stats = [dict.fromkeys(['label', 'mean', 'iqr', 'cilo', 'cihi', 'whishi', 'whislo', 'fliers', 'q1', 'med', 'q3'], []) @@ -170,4 +170,4 @@ def plot_residuals(data_diff, mask, R_k, q1, q3, iqr, residual_basename): axe[0, 1].set_xlabel('DW image') axe[0, 1].set_ylabel('Percentage of outlier voxels') axe[0, 1].set_title('Outliers') - plt.savefig(residual_basename + '_residuals_stats.png') \ No newline at end of file + plt.savefig(residual_basename + '_residuals_stats.png') From f2de323ad38127318c490f05e9e424d7babee2b0 Mon Sep 17 00:00:00 2001 From: Anthony Gagnon Date: Thu, 12 Dec 2024 11:46:02 -0500 Subject: [PATCH 29/44] fix data shape assertion so it recognizes dps --- scilpy/io/hdf5.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scilpy/io/hdf5.py b/scilpy/io/hdf5.py index e31b9dad8..aa3766ae6 100644 --- a/scilpy/io/hdf5.py +++ b/scilpy/io/hdf5.py @@ -105,7 +105,7 @@ def reconstruct_sft_from_hdf5(hdf5_handle, group_keys, space=Space.VOX, for sub_key in hdf5_handle[group_key].keys(): if sub_key not in ['data', 'offsets', 'lengths']: data = hdf5_handle[group_key][sub_key] - if data.shape == hdf5_handle[group_key]['offsets']: + if data.shape == hdf5_handle[group_key]['offsets'].shape: # Discovered dps if load_dps: if i == 0 or not merge_groups: From 2dce9cba415bbace07ef1dd8b949c70a2107a533 Mon Sep 17 00:00:00 2001 From: Jeremi Levesque Date: Thu, 12 Dec 2024 12:57:27 -0500 Subject: [PATCH 30/44] fix: name of file that already exists --- scripts/tests/test_tractogram_filter_by_length.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/tests/test_tractogram_filter_by_length.py b/scripts/tests/test_tractogram_filter_by_length.py index 85e94e0f3..d5f4eda7d 100644 --- a/scripts/tests/test_tractogram_filter_by_length.py +++ b/scripts/tests/test_tractogram_filter_by_length.py @@ -60,7 +60,7 @@ def test_rejected_filtering_no_rejection(script_runner, monkeypatch): in_bundle = os.path.join(SCILPY_HOME, 'filtering', 'bundle_4.trk') ret = script_runner.run('scil_tractogram_filter_by_length.py', - in_bundle, 'bundle_4_filtered.trk', + in_bundle, 'bundle_4_filtered_no_rejection.trk', '--minL', '125', '--maxL', '130', '--out_rejected', 'bundle_4_rejected.trk') assert ret.success @@ -68,7 +68,7 @@ def test_rejected_filtering_no_rejection(script_runner, monkeypatch): # File should be created even though there are no rejected streamlines assert os.path.exists('bundle_4_rejected.trk') - sft = load_tractogram('bundle_4_filtered.trk', 'same') + sft = load_tractogram('bundle_4_filtered_no_rejection.trk', 'same') rejected_sft = load_tractogram('bundle_4_rejected.trk', 'same') assert len(sft) == 52 From f75886a989d3245273e45318b5ee7d3a0c780751 Mon Sep 17 00:00:00 2001 From: EmmaRenauld Date: Thu, 12 Dec 2024 12:10:10 -0500 Subject: [PATCH 31/44] Fix common edges mask --- scripts/scil_connectivity_compute_pca.py | 32 ++++++++++++++++-------- 1 file changed, 21 insertions(+), 11 deletions(-) diff --git a/scripts/scil_connectivity_compute_pca.py b/scripts/scil_connectivity_compute_pca.py index 6ab12ed8e..4ae84ce71 100755 --- a/scripts/scil_connectivity_compute_pca.py +++ b/scripts/scil_connectivity_compute_pca.py @@ -104,6 +104,9 @@ def _build_arg_parser(): p.add_argument('--input_connectoflow', action='store_true', help='If true, script will assume the input folder is a ' 'Connectoflow output.') + p.add_argument('--show', action='store_true', + help="If set, show matplotlib figures. Else, they are " + "only saved in the output folder.") add_verbose_arg(p) add_overwrite_arg(p) @@ -116,8 +119,6 @@ def plot_eigenvalues(pca, principaldf, nb_metrics, out_file): logging.info('Plotting results...') eigenvalues = pca.explained_variance_ pos = list(range(1, nb_metrics + 1)) - plt.clf() - plt.cla() fig = plt.figure() ax = fig.add_subplot(1, 1, 1) @@ -136,8 +137,7 @@ def plot_explained_variance(pca, principaldf, nb_metrics, out_file): # Plot the explained variance. explained_var = pca.explained_variance_ratio_ pos = list(range(1, nb_metrics + 1)) - plt.clf() - plt.cla() + fig = plt.figure() ax = fig.add_subplot(1, 1, 1) bar_var = ax.bar(pos, explained_var, align='center', @@ -157,8 +157,7 @@ def plot_contribution(pca, principaldf, metrics, out_excel, out_image): output_component = pd.DataFrame(component, index=principaldf.columns, columns=metrics) output_component.to_excel(out_excel, index=True, header=True) - plt.clf() - plt.cla() + fig, axs = plt.subplots(2) fig.suptitle('Graph of the contribution of each measures to the first ' 'and second principal component.', fontsize=10) @@ -209,8 +208,8 @@ def main(): with open(args.list_ids) as f: subjects = f.read().split() + # Loading all matrices. if args.input_connectoflow: - # Loading all matrix. logging.info('Loading all matrices from a Connectoflow output...') files_per_metric = [[os.path.join( args.in_folder, a, 'Compute_Connectivity', '{}.npy'.format(m)) @@ -230,23 +229,31 @@ def main(): # Setting individual matrix shape. mat_shape = matrices_per_metric[0][0].shape + # Creating input structure if args.all_edges: - # Creating input structure using all edges from all subjects. logging.info('Creating PCA input structure with all edges...') else: logging.info('Creating PCA input structure with common edges...') + nb_subjects = len(matrices_per_metric[0]) - common_edges_mask = np.sum(matrices_per_metric[0]) != 0 + # Using the first metric + subj_masks = [m !=0 for m in matrices_per_metric[0]] # Binary per subj + common_edges_mask = np.sum(subj_masks, axis=0) == nb_subjects # Verifying that other metrics have the same common edges for i, matrices in enumerate(matrices_per_metric[1:]): - tmp_mask = np.sum(matrices) != 0 - if not np.array_equal(common_edges_mask, tmp_mask): + tmp_subj_masks = [m !=0 for m in matrices] # Binary per subj + tmp_common = np.sum(tmp_subj_masks, axis=0) == nb_subjects + if not np.array_equal(common_edges_mask, tmp_common): parser.error("Different binary masks (common edge) have been " "generated from metric {} than other metrics. " "Please validate your input data." .format(args.metrics[i])) + plt.figure() + plt.imshow(common_edges_mask) + plt.title(common_edges_mask) + logging.info('Data shows {} common connections across the population.' .format(np.sum(common_edges_mask))) @@ -304,6 +311,9 @@ def main(): f'{subjects[s]}_PC{i+1}.npy') save_matrix_in_any_format(filename, out[i, s, :, :]) + if args.show: + plt.show() + if __name__ == "__main__": main() From 48429f890e7e3d39b53aa8711eaa7cd6fc032675 Mon Sep 17 00:00:00 2001 From: EmmaRenauld Date: Thu, 12 Dec 2024 13:21:35 -0500 Subject: [PATCH 32/44] Fix pep8 --- scripts/scil_connectivity_compute_pca.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/scil_connectivity_compute_pca.py b/scripts/scil_connectivity_compute_pca.py index 4ae84ce71..670c82ddc 100755 --- a/scripts/scil_connectivity_compute_pca.py +++ b/scripts/scil_connectivity_compute_pca.py @@ -237,12 +237,12 @@ def main(): nb_subjects = len(matrices_per_metric[0]) # Using the first metric - subj_masks = [m !=0 for m in matrices_per_metric[0]] # Binary per subj + subj_masks = [m != 0 for m in matrices_per_metric[0]] # Mask per subj common_edges_mask = np.sum(subj_masks, axis=0) == nb_subjects # Verifying that other metrics have the same common edges for i, matrices in enumerate(matrices_per_metric[1:]): - tmp_subj_masks = [m !=0 for m in matrices] # Binary per subj + tmp_subj_masks = [m != 0 for m in matrices] # Binary per subj tmp_common = np.sum(tmp_subj_masks, axis=0) == nb_subjects if not np.array_equal(common_edges_mask, tmp_common): parser.error("Different binary masks (common edge) have been " From da8863bd4c4e879eb48c8dadc71b99e1a25e5cbd Mon Sep 17 00:00:00 2001 From: karp2601 Date: Thu, 12 Dec 2024 13:38:53 -0500 Subject: [PATCH 33/44] Fixing bug --- scripts/scil_gradients_generate_sampling.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/scil_gradients_generate_sampling.py b/scripts/scil_gradients_generate_sampling.py index 850188fca..12b1a15f8 100755 --- a/scripts/scil_gradients_generate_sampling.py +++ b/scripts/scil_gradients_generate_sampling.py @@ -62,7 +62,7 @@ def _build_arg_parser(): "Default if you add no option is to have a b0 " "at the start.") gg = g.add_mutually_exclusive_group() - gg.add_argument('--no_b0_start', + gg.add_argument('--no_b0_start', action='store_true', help="If set, do not add a b0 at the beginning. ") gg.add_argument('--b0_every', type=int, help='Interleave a b0 every n=b0_every values. Starts ' From 845a34779d7c61fd02abc2741abb62d2309804de Mon Sep 17 00:00:00 2001 From: AlexVCaron Date: Thu, 12 Dec 2024 13:53:45 -0500 Subject: [PATCH 34/44] [VIS] Fury upcoming deprecated call Fixes #1059 --- scilpy/io/fetcher.py | 3 +-- scilpy/viz/backends/fury.py | 2 +- scripts/tests/test_viz_volume_screenshot.py | 3 ++- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/scilpy/io/fetcher.py b/scilpy/io/fetcher.py index 6876d6e6e..945a1e2fb 100644 --- a/scilpy/io/fetcher.py +++ b/scilpy/io/fetcher.py @@ -5,9 +5,8 @@ import logging import os import pathlib -import zipfile - import requests +import zipfile from scilpy import SCILPY_HOME diff --git a/scilpy/viz/backends/fury.py b/scilpy/viz/backends/fury.py index 3940dacfb..ab930cb6d 100644 --- a/scilpy/viz/backends/fury.py +++ b/scilpy/viz/backends/fury.py @@ -148,7 +148,7 @@ def set_viewport(scene, orientation, slice_index, volume_shape, aspect_ratio): Ratio between viewport's width and height. """ - scene.projection('parallel') + scene.projection(proj_type='parallel') camera = initialize_camera( orientation, slice_index, volume_shape, aspect_ratio) scene.set_camera(position=camera[CamParams.VIEW_POS], diff --git a/scripts/tests/test_viz_volume_screenshot.py b/scripts/tests/test_viz_volume_screenshot.py index 1e4db3509..12cb79aab 100644 --- a/scripts/tests/test_viz_volume_screenshot.py +++ b/scripts/tests/test_viz_volume_screenshot.py @@ -12,7 +12,8 @@ tmp_dir = tempfile.TemporaryDirectory() -def test_screenshot(script_runner): +def test_screenshot(script_runner, monkeypatch): + monkeypatch.chdir(os.path.expanduser(tmp_dir.name)) in_fa = os.path.join(SCILPY_HOME, 'bst', 'fa.nii.gz') ret = script_runner.run("scil_viz_volume_screenshot.py", in_fa, 'fa.png') From c0438228b971e5f401c6a59a92b235e3065ac19f Mon Sep 17 00:00:00 2001 From: karp2601 Date: Thu, 12 Dec 2024 13:59:09 -0500 Subject: [PATCH 35/44] Small update of read_bvals_bvecs --- scripts/scil_viz_gradients_screenshot.py | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/scripts/scil_viz_gradients_screenshot.py b/scripts/scil_viz_gradients_screenshot.py index 64d9ac7da..4ed7357ab 100755 --- a/scripts/scil_viz_gradients_screenshot.py +++ b/scripts/scil_viz_gradients_screenshot.py @@ -12,6 +12,7 @@ import os from dipy.data import get_sphere +from dipy.io.gradients import read_bvals_bvecs from scilpy.gradients.bvec_bval_tools import identify_shells from scilpy.io.utils import (add_overwrite_arg, @@ -120,11 +121,8 @@ def main(): if len(args.in_gradient_scheme) == 2: in_gradient_schemes = args.in_gradient_scheme in_gradient_schemes.sort() # [bval, bvec] - # bvecs/bvals (FSL) format, X Y Z AND b (or transpose) - points = np.genfromtxt(in_gradient_schemes[1]) - if points.shape[0] == 3: - points = points.T - bvals = np.genfromtxt(in_gradient_schemes[0]) + bvals, points = read_bvals_bvecs(in_gradient_schemes[0], + in_gradient_schemes[1]) centroids, shell_idx = identify_shells(bvals) else: # MRtrix format X, Y, Z, b From b9de36d0116ba42a1b9f468b40e74d47342b9418 Mon Sep 17 00:00:00 2001 From: Gabriel Girard Date: Thu, 12 Dec 2024 15:24:25 -0500 Subject: [PATCH 36/44] DOC - fixed updated script name --- docs/source/documentation/construct_participants_tsv_file.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/source/documentation/construct_participants_tsv_file.rst b/docs/source/documentation/construct_participants_tsv_file.rst index 1ca030270..921085021 100644 --- a/docs/source/documentation/construct_participants_tsv_file.rst +++ b/docs/source/documentation/construct_participants_tsv_file.rst @@ -1,4 +1,4 @@ -Instructions to write the tsv files "participants.tsv" for the script scil_group_comparison.py +Instructions to write the tsv files "participants.tsv" for the script scil_stats_group_comparison.py =============================================================================================== The TSV file should follow the BIDS `specification `_. @@ -12,7 +12,7 @@ participant_id categorical_var_1 categorical_var_2 ... (ex: participant_id sex nb_children) -The categorical variable name are the "group_by" variable that can be called by scil_group_comparison.py +The categorical variable name are the "group_by" variable that can be called by scil_stats_group_comparison.py Specific row ------------ From 4011f60be59362ff1b27be85f252d4108887e33a Mon Sep 17 00:00:00 2001 From: VincentBeaud Date: Fri, 13 Dec 2024 13:01:29 -0500 Subject: [PATCH 37/44] Bit of cleaning --- scilpy/tracking/seed.py | 3 +-- scilpy/tracking/utils.py | 5 +++-- scripts/scil_tracking_local_dev.py | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/scilpy/tracking/seed.py b/scilpy/tracking/seed.py index 8f869548c..daa1cb199 100644 --- a/scilpy/tracking/seed.py +++ b/scilpy/tracking/seed.py @@ -371,7 +371,7 @@ class CustomSeedsDispenser(SeedGenerator): Adaptation of the scilpy.tracking.seed.SeedGenerator interface for using already generated, custom seeds. """ - def __init__(self, custom_seeds, voxres, space=Space('vox'), + def __init__(self, custom_seeds, space=Space('vox'), origin=Origin('center')): """ Parameters @@ -381,7 +381,6 @@ def __init__(self, custom_seeds, voxres, space=Space('vox'), voxres: np.ndarray(3,) The pixel resolution, ex, using img.header.get_zooms()[:3]. """ - self.voxres = voxres self.origin = origin self.space = space self.seeds = custom_seeds diff --git a/scilpy/tracking/utils.py b/scilpy/tracking/utils.py index 15ffd2e5a..853ebd4bc 100644 --- a/scilpy/tracking/utils.py +++ b/scilpy/tracking/utils.py @@ -129,8 +129,9 @@ def add_seeding_options(p): seed_sub_exclusive.add_argument( '--in_custom_seeds', type=str, help='Path to a file containing a list of custom seeding \n' - 'coordinates (.txt, .mat or .npy). They should be in' - 'voxel space.') + 'coordinates (.txt, .mat or .npy). They should be in \n' + 'voxel space. In the case of a text file, each line should \n' + 'contain a single seed, written in the format: [x, y, z].') def add_out_options(p): diff --git a/scripts/scil_tracking_local_dev.py b/scripts/scil_tracking_local_dev.py index 889655558..d250c6f55 100755 --- a/scripts/scil_tracking_local_dev.py +++ b/scripts/scil_tracking_local_dev.py @@ -206,7 +206,7 @@ def main(): seed_res = seed_img.header.get_zooms()[:3] if args.in_custom_seeds: seeds = np.squeeze(load_matrix_in_any_format(args.in_custom_seeds)) - seed_generator = CustomSeedsDispenser(seeds, seed_res, space=our_space, + seed_generator = CustomSeedsDispenser(seeds, space=our_space, origin=our_origin) nbr_seeds = len(seeds) else: From 575b0617d676d2c49a97bd895e96466017177808 Mon Sep 17 00:00:00 2001 From: VincentBeaud Date: Fri, 13 Dec 2024 13:23:32 -0500 Subject: [PATCH 38/44] typo --- scilpy/tracking/seed.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scilpy/tracking/seed.py b/scilpy/tracking/seed.py index daa1cb199..d8dc2887c 100644 --- a/scilpy/tracking/seed.py +++ b/scilpy/tracking/seed.py @@ -384,7 +384,7 @@ def __init__(self, custom_seeds, space=Space('vox'), self.origin = origin self.space = space self.seeds = custom_seeds - self.count = 0 + self.i = 0 def init_generator(self, rng_seed, numbers_to_skip): """ From 5d09d49d0e5e0de0503c076c738e257587b892c9 Mon Sep 17 00:00:00 2001 From: VincentBeaud Date: Mon, 16 Dec 2024 11:02:29 -0500 Subject: [PATCH 39/44] Fix doc --- scilpy/tracking/seed.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/scilpy/tracking/seed.py b/scilpy/tracking/seed.py index d8dc2887c..6fa1247a0 100644 --- a/scilpy/tracking/seed.py +++ b/scilpy/tracking/seed.py @@ -374,12 +374,19 @@ class CustomSeedsDispenser(SeedGenerator): def __init__(self, custom_seeds, space=Space('vox'), origin=Origin('center')): """ + Custom seeds need to be in the same space and origin as the ODFs used + for tracking. + Parameters ---------- custom_seeds: list Custom seeding coordinates. - voxres: np.ndarray(3,) - The pixel resolution, ex, using img.header.get_zooms()[:3]. + space: Space (optional) + The Dipy space in which the seeds were saved. + Default: Space.Vox or 'vox' + origin: Origin (optional) + The Dipy origin in which the seeds were saved. + Default: Origin.NIFTI or 'center' """ self.origin = origin self.space = space From 9ef0c813a1f5ad70ad101425da84906b8c9b0c46 Mon Sep 17 00:00:00 2001 From: Alex Valcourt Caron Date: Mon, 16 Dec 2024 13:20:24 -0500 Subject: [PATCH 40/44] Update test.yml --- .github/workflows/test.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 42c99cb2f..a3f4e682c 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -7,6 +7,7 @@ on: pull_request: branches: - master + merge_group: concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} From 7302a3643fab8c772bf726e38d5f67e85817161d Mon Sep 17 00:00:00 2001 From: Arnaud Bore Date: Mon, 16 Dec 2024 15:15:06 -0500 Subject: [PATCH 41/44] Update test.yml change runner for coverage --- .github/workflows/test.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index a3f4e682c..d5afe5c89 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -71,7 +71,7 @@ jobs: .test_reports/ coverage: - runs-on: scilus-runners + runs-on: ubuntu-latest if: github.repository == 'scilus/scilpy' needs: test From 3d832a511f4ddc9368a06e79483bc3374b6c727a Mon Sep 17 00:00:00 2001 From: karp2601 Date: Mon, 16 Dec 2024 15:13:00 -0500 Subject: [PATCH 42/44] Fixing function bug --- scripts/scil_viz_gradients_screenshot.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/scil_viz_gradients_screenshot.py b/scripts/scil_viz_gradients_screenshot.py index 4ed7357ab..128c5bf21 100755 --- a/scripts/scil_viz_gradients_screenshot.py +++ b/scripts/scil_viz_gradients_screenshot.py @@ -89,7 +89,7 @@ def main(): if len(args.in_gradient_scheme) == 2: assert_gradients_filenames_valid(parser, args.in_gradient_scheme, - 'fsl') + True) elif len(args.in_gradient_scheme) == 1: basename, ext = os.path.splitext(args.in_gradient_scheme[0]) if ext in ['.bvec', '.bvecs', '.bvals', '.bval']: @@ -98,7 +98,7 @@ def main(): else: assert_gradients_filenames_valid(parser, args.in_gradient_scheme, - 'mrtrix') + False) else: parser.error('Depending on the gradient format you should have ' 'two files for FSL format and one file for MRtrix') From 7a6707fcabe934722de3f13f19706b296d29ff64 Mon Sep 17 00:00:00 2001 From: EmmaRenauld Date: Tue, 17 Dec 2024 13:40:39 -0500 Subject: [PATCH 43/44] Fix title --- scripts/scil_connectivity_compute_pca.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/scil_connectivity_compute_pca.py b/scripts/scil_connectivity_compute_pca.py index 670c82ddc..56b09541b 100755 --- a/scripts/scil_connectivity_compute_pca.py +++ b/scripts/scil_connectivity_compute_pca.py @@ -252,7 +252,7 @@ def main(): plt.figure() plt.imshow(common_edges_mask) - plt.title(common_edges_mask) + plt.title("Common edges mask") logging.info('Data shows {} common connections across the population.' .format(np.sum(common_edges_mask))) From 92fbb646799162deab66783eb3c17b956aa56f60 Mon Sep 17 00:00:00 2001 From: Philippe Karan Date: Tue, 17 Dec 2024 15:05:04 -0500 Subject: [PATCH 44/44] Points to bvecs --- scripts/scil_viz_gradients_screenshot.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/scripts/scil_viz_gradients_screenshot.py b/scripts/scil_viz_gradients_screenshot.py index 128c5bf21..0eed8a728 100755 --- a/scripts/scil_viz_gradients_screenshot.py +++ b/scripts/scil_viz_gradients_screenshot.py @@ -121,14 +121,14 @@ def main(): if len(args.in_gradient_scheme) == 2: in_gradient_schemes = args.in_gradient_scheme in_gradient_schemes.sort() # [bval, bvec] - bvals, points = read_bvals_bvecs(in_gradient_schemes[0], - in_gradient_schemes[1]) + bvals, bvecs = read_bvals_bvecs(in_gradient_schemes[0], + in_gradient_schemes[1]) centroids, shell_idx = identify_shells(bvals) else: # MRtrix format X, Y, Z, b in_gradient_scheme = args.in_gradient_scheme[0] tmp = np.genfromtxt(in_gradient_scheme, delimiter=' ') - points = tmp[:, :3] + bvecs = tmp[:, :3] bvals = tmp[:, 3] centroids, shell_idx = identify_shells(bvals) @@ -159,7 +159,7 @@ def main(): for idx, val in enumerate(shell_idx): if val != 0 and val != -1: shell_idx[idx] -= len(np.where(indexes < val)[0]) - ms = build_ms_from_shell_idx(points, shell_idx) + ms = build_ms_from_shell_idx(bvecs, shell_idx) else: ms = [get_sphere(args.dipy_sphere).vertices]