generated from bids-apps/example
-
Notifications
You must be signed in to change notification settings - Fork 0
/
run.py
executable file
·89 lines (78 loc) · 3.76 KB
/
run.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
#!/usr/bin/env python3
import argparse
import os
import subprocess
import nibabel
import numpy
from glob import glob
__version__ = open(os.path.join(os.path.dirname(os.path.realpath(__file__)),
'version')).read()
def run(command, env={}):
merged_env = os.environ
merged_env.update(env)
process = subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, shell=True,
env=merged_env)
while True:
line = process.stdout.readline()
line = str(line, 'utf-8')[:-1]
print(line)
if line == '' and process.poll() != None:
break
if process.returncode != 0:
raise Exception("Non zero return code: %d"%process.returncode)
parser = argparse.ArgumentParser(description='Example BIDS App entrypoint script.')
parser.add_argument('bids_dir', help='The directory with the input dataset '
'formatted according to the BIDS standard.')
parser.add_argument('output_dir', help='The directory where the output files '
'should be stored. If you are running group level analysis '
'this folder should be prepopulated with the results of the'
'participant level analysis.')
parser.add_argument('analysis_level', help='Level of the analysis that will be performed. '
'Multiple participant level analyses can be run independently '
'(in parallel) using the same output_dir.',
choices=['participant', 'group'])
parser.add_argument('--participant_label', help='The label(s) of the participant(s) that should be analyzed. The label '
'corresponds to sub-<participant_label> from the BIDS spec '
'(so it does not include "sub-"). If this parameter is not '
'provided all subjects should be analyzed. Multiple '
'participants can be specified with a space separated list.',
nargs="+")
parser.add_argument('--skip_bids_validator', help='Whether or not to perform BIDS dataset validation',
action='store_true')
parser.add_argument(
'-v',
'--version',
action='version',
version=f'BIDS-App example version {__version__}',
)
args = parser.parse_args()
if not args.skip_bids_validator:
run(f'bids-validator {args.bids_dir}')
subjects_to_analyze = []
# only for a subset of subjects
if args.participant_label:
subjects_to_analyze = args.participant_label
# for all subjects
else:
subject_dirs = glob(os.path.join(args.bids_dir, "sub-*"))
subjects_to_analyze = [subject_dir.split("-")[-1] for subject_dir in subject_dirs]
# running participant level
if args.analysis_level == "participant":
# find all T1s and skullstrip them
for subject_label in subjects_to_analyze:
for T1_file in (glob(os.path.join(args.bids_dir, f"sub-{subject_label}", "anat", "*_T1w.nii*"))
+ glob(os.path.join(args.bids_dir, f"sub-{subject_label}", "ses-*", "anat", "*_T1w.nii*"))):
out_file = os.path.split(T1_file)[-1].replace("_T1w.", "_brain.")
cmd = f"bet {T1_file} {os.path.join(args.output_dir, out_file)}"
print(cmd)
run(cmd)
elif args.analysis_level == "group":
brain_sizes = []
for subject_label in subjects_to_analyze:
for brain_file in glob(os.path.join(args.output_dir, f"sub-{subject_label}*.nii*")):
data = nibabel.load(brain_file).get_fdata()
# calcualte average mask size in voxels
brain_sizes.append((data != 0).sum())
with open(os.path.join(args.output_dir, "avg_brain_size.txt"), 'w') as fp:
fp.write("Average brain size is %g voxels"%numpy.array(brain_sizes).mean())