From 5a8cf62412d3e3474d4239a2b9f662268c2f96d9 Mon Sep 17 00:00:00 2001 From: IsraMekki0 Date: Fri, 27 Jan 2023 12:49:23 +0100 Subject: [PATCH 01/10] merged brats-toolkit with source and added pyproject.toml --- LICENSE | 2 +- brats_toolkit/__init__.py | 1 + brats_toolkit/_version.py | 4 + brats_toolkit/cli.py | 214 +++++++++ brats_toolkit/config/cpu_dockers.json | 29 ++ brats_toolkit/config/dockers.json | 167 +++++++ brats_toolkit/config/dockers_demo.json | 20 + brats_toolkit/config/fileformats.json | 38 ++ brats_toolkit/config/gpu_dockers.json | 47 ++ brats_toolkit/config/survival_dockers.json | 58 +++ brats_toolkit/fusionator.py | 454 ++++++++++++++++++ brats_toolkit/preprocessor.py | 140 ++++++ brats_toolkit/segmentor.py | 371 ++++++++++++++ brats_toolkit/util/__init__.py | 1 + .../util/backend_scripts/unix_docker.sh | 29 ++ .../util/backend_scripts/unix_docker_gpu.sh | 24 + .../util/backend_scripts/win_docker.cmd | 11 + brats_toolkit/util/docker_functions.py | 93 ++++ brats_toolkit/util/filemanager.py | 230 +++++++++ brats_toolkit/util/own_itk.py | 270 +++++++++++ brats_toolkit/util/prep_utils.py | 14 + pyproject.toml | 58 +++ references.bib | 109 +++++ 23 files changed, 2383 insertions(+), 1 deletion(-) create mode 100644 brats_toolkit/__init__.py create mode 100644 brats_toolkit/_version.py create mode 100644 brats_toolkit/cli.py create mode 100644 brats_toolkit/config/cpu_dockers.json create mode 100644 brats_toolkit/config/dockers.json create mode 100644 brats_toolkit/config/dockers_demo.json create mode 100644 brats_toolkit/config/fileformats.json create mode 100644 brats_toolkit/config/gpu_dockers.json create mode 100644 brats_toolkit/config/survival_dockers.json create mode 100644 brats_toolkit/fusionator.py create mode 100644 brats_toolkit/preprocessor.py create mode 100644 brats_toolkit/segmentor.py create mode 100755 brats_toolkit/util/__init__.py create mode 100755 brats_toolkit/util/backend_scripts/unix_docker.sh create mode 100755 brats_toolkit/util/backend_scripts/unix_docker_gpu.sh create mode 100755 brats_toolkit/util/backend_scripts/win_docker.cmd create mode 100644 brats_toolkit/util/docker_functions.py create mode 100755 brats_toolkit/util/filemanager.py create mode 100755 brats_toolkit/util/own_itk.py create mode 100644 brats_toolkit/util/prep_utils.py create mode 100644 pyproject.toml create mode 100644 references.bib diff --git a/LICENSE b/LICENSE index 42c2762..0b27431 100644 --- a/LICENSE +++ b/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2019 Florian Kofler (florian.kofler[at]tum.de) & Christoph Berger (c.berger[at]tum.de) +Copyright (c) 2019 Florian Kofler (florian.kofler[at]tum.de), Christoph Berger (c.berger[at]tum.de), Isra Mekki (isra.mekki[at]helmholtz-muenchen.de) & Mahyar Valizadeh (mahyar.valizadeh[at]helmholtz-muenchen.de) GNU AFFERO GENERAL PUBLIC LICENSE Version 3, 19 November 2007 diff --git a/brats_toolkit/__init__.py b/brats_toolkit/__init__.py new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/brats_toolkit/__init__.py @@ -0,0 +1 @@ + diff --git a/brats_toolkit/_version.py b/brats_toolkit/_version.py new file mode 100644 index 0000000..2337850 --- /dev/null +++ b/brats_toolkit/_version.py @@ -0,0 +1,4 @@ +# file generated by setuptools_scm +# don't change, don't track in version control +__version__ = version = '0.1.dev37+gbee34b1.d20230127' +__version_tuple__ = version_tuple = (0, 1, 'dev37', 'gbee34b1.d20230127') diff --git a/brats_toolkit/cli.py b/brats_toolkit/cli.py new file mode 100644 index 0000000..76cb687 --- /dev/null +++ b/brats_toolkit/cli.py @@ -0,0 +1,214 @@ +# -*- coding: utf-8 -*- +# Author: Christoph Berger +# Script for evaluation and bulk segmentation of Brain Tumor Scans +# using the MICCAI BRATS algorithmic repository +# +# Please refer to README.md and LICENSE.md for further documentation +# This software is not certified for clinical use. + +import sys +import subprocess +import pprint +import argparse + +from . import segmentor, fusionator, preprocessor + +def list_dockers(): + seg = segmentor.Segmentor() + pp = pprint.PrettyPrinter(indent=4) + pp.pprint(seg.config) + +def list_docker_ids(): + seg = segmentor.Segmentor() + pp = pprint.PrettyPrinter(indent=4) + pp.pprint(seg.config.keys()) + +def list_docker_gpu(): + seg = segmentor.Segmentor() + print('all these images support GPU computations:') + for id in seg.config.keys(): + if seg.config[id]['runtime'] == 'nvidia': + print(id) + +def list_docker_cpu(): + seg = segmentor.Segmentor() + print('all these images support CPU computations:') + for id in seg.config.keys(): + if seg.config[id]['runtime'] == 'runc': + print(id) + +def fusion(): + parser = argparse.ArgumentParser(description='Runs the Docker orchestra to fuse segmentations. All inputs have to have equal shape and label values') + parser.add_argument('-i','--input', required=True, + help = 'Input directory containing all .nii.gz files to be fused') + parser.add_argument('-m','--method', required=True, + help = 'Method for fusion: mav for majority voting, simple for SIMPLE') + parser.add_argument('-o', '--output', + help = 'Filename for the output in format filename.nii.gz') + parser.add_argument('-v', '--verbose',action='store_true', + help = 'Verbose mode outputs log info to the command line.') + try: + args = parser.parse_args() + except SystemExit as e: + if e.code == 2: + parser.print_help() + sys.exit(e.code) + try: + # runs the segmentation with all the settings wished for by the user + fus = fusionator.Fusionator(verbose=args.verbose) + fus.dirFuse(args.input, method=args.method, outputPath=args.output) + except subprocess.CalledProcessError as e: + # Ignoring errors happening in the Docker Process, otherwise we'd e.g. get error messages on exiting the Docker via CTRL+D. + pass + except Exception as e: + print('ERROR DETAIL: ', e) + +def segmentation(): + parser = argparse.ArgumentParser(description='Runs the Docker orchestra to segment and fuse segmentations based on the' \ + 'BraTS algorithmic repository' \ + 'Please keep in mind that some models require Nvidia-Docker to run as' \ + ' they need a supported GPU.') + parser.add_argument('-l', '--list', + help = 'List all models available for segmentation.', + action = 'store_true') + parser.add_argument('-ll', '--longlist', + help = 'List all models available for segmentation with details.', + action = 'store_true') + parser.add_argument('-lc', '--cpulist', + help = 'List all models supporting cpus.', + action = 'store_true') + parser.add_argument('-lg', '--gpulist', + help = 'List all models supporting gpus.', + action = 'store_true') + parser.add_argument('-t1',required=True, + help = 'Path to the t1 modality.') + parser.add_argument('-t1c',required=True, + help = 'Path to the t1c modality.') + parser.add_argument('-t2',required=True, + help = 'Path to the t2 modality.') + parser.add_argument('-fla',required=True, + help = 'Path to the fla modality.') + parser.add_argument('-d', '--docker', required=True, + help = 'Container ID or method used for fusion. (mav, simple, all). Run brats-orchestra --list to display all options.') + parser.add_argument('-o', '--output',required=True, + help = 'Path to the desired output file.') + parser.add_argument('-v', '--verbose',action='store_true', + help = 'Verbose mode outputs log info to the command line.') + parser.add_argument('-c', '--config', + help = 'Add a path to a custom config file for dockers here.') + parser.add_argument('-g', '--gpu', action='store_true', + help = 'Pass this flag if your Docker version already supports the --gpus flag.') + parser.add_argument('-gi', '--gpuid', + help = 'Specify the GPU bus ID to be used.') + try: + if '-l' in sys.argv[1:] or '--list' in sys.argv[1:]: + list_docker_ids() + sys.exit(0) + elif '-ll' in sys.argv[1:] or '--longlist' in sys.argv[1:]: + list_dockers() + sys.exit(0) + elif '-lg' in sys.argv[1:] or '--gpulist' in sys.argv[1:]: + list_docker_gpu() + sys.exit(0) + elif '-lc' in sys.argv[1:] or '--cpulist' in sys.argv[1:]: + list_docker_cpu() + sys.exit(0) + else: + args = parser.parse_args() + except SystemExit as e: + if e.code == 2: + parser.print_help() + sys.exit(e.code) + try: + # runs the segmentation with all the settings wished for by the user + seg = segmentor.Segmentor(config=args.config, verbose=args.verbose, newdocker=args.gpu, gpu=str(args.gpuid)) + seg.segment(t1=args.t1, t1c=args.t1c, t2=args.t2, fla=args.fla, cid=args.docker, outputPath=args.output) + except subprocess.CalledProcessError as e: + # Ignoring errors happening in the Docker Process, otherwise we'd e.g. get error messages on exiting the Docker via CTRL+D. + pass + except Exception as e: + print('ERROR DETAIL: ', e) + +def batchpreprocess(): + parser = argparse.ArgumentParser(description='Runs the preprocessing for MRI scans on a folder of images.') + parser.add_argument('-i', '--input', required=True, help='The input directory with 4 modalities in unprocessed Nifti format.') + parser.add_argument('-o', '--output',required=True, + help = 'Path to the desired output directory.') + parser.add_argument('-s', '--skipupdate',action='store_true', + help = 'If passed, the backend will not be updated.') + parser.add_argument('-c', '--confirm', action='store_true', + help = 'If passed, the container will ask for confirmation') + parser.add_argument('-g', '--gpu', action='store_true', + help = 'Pass this flag if you want to use GPU computations.') + parser.add_argument('-gi', '--gpuid', + help = 'Specify the GPU bus ID to be used.') + try: + args = parser.parse_args() + except SystemExit as e: + if e.code == 2: + parser.print_help() + sys.exit(e.code) + try: + # runs the preprocessing with all the settings wished for by the user + pre = preprocessor.Preprocessor() + if args.gpu: + mode = "gpu" + else: + mode = "cpu" + if args.gpuid: + gpuid = str(args.gpuid) + else: + gpuid = '0' + pre.batch_preprocess(exam_import_folder=args.input, exam_export_folder=args.output, mode=mode, confirm=args.confirm, skipUpdate=args.skipupdate, gpuid=gpuid) + except subprocess.CalledProcessError as e: + # Ignoring errors happening in the Docker Process, otherwise we'd e.g. get error messages on exiting the Docker via CTRL+D. + pass + except Exception as e: + print('ERROR DETAIL: ', e) + +def singlepreprocess(): + parser = argparse.ArgumentParser(description='Runs the preprocessing for MRI scans on a single set of images.') + parser.add_argument('-t1',required=True, + help = 'Path to the t1 modality.') + parser.add_argument('-t1c',required=True, + help = 'Path to the t1c modality.') + parser.add_argument('-t2',required=True, + help = 'Path to the t2 modality.') + parser.add_argument('-fla',required=True, + help = 'Path to the fla modality.') + parser.add_argument('-o', '--output',required=True, + help = 'Path to the desired output directory.') + parser.add_argument('-s', '--skipupdate',action='store_true', + help = 'If passed, the backend will not be updated.') + parser.add_argument('-c', '--confirm', action='store_true', + help = 'If passed, the container will ask for confirmation') + parser.add_argument('-g', '--gpu', action='store_true', + help = 'Pass this flag if you want to use GPU computations.') + parser.add_argument('-gi', '--gpuid', + help = 'Specify the GPU bus ID to be used.') + try: + args = parser.parse_args() + except SystemExit as e: + if e.code == 2: + parser.print_help() + sys.exit(e.code) + try: + # runs the preprocessing with all the settings wished for by the user + pre = preprocessor.Preprocessor() + if args.gpu: + mode = "gpu" + else: + mode = "cpu" + if args.gpuid: + gpuid = str(args.gpuid) + else: + gpuid = '0' + pre.single_preprocess(t1File=args.t1, t1cFile=args.t1c, t2File=args.t2, flaFile=args.fla, outputFolder=args.output, mode=mode, confirm=args.confirm, skipUpdate=args.skipupdate, gpuid=gpuid) + except subprocess.CalledProcessError as e: + # Ignoring errors happening in the Docker Process, otherwise we'd e.g. get error messages on exiting the Docker via CTRL+D. + pass + except Exception as e: + print('ERROR DETAIL: ', e) + +if __name__ == '__main__': + segmentation() diff --git a/brats_toolkit/config/cpu_dockers.json b/brats_toolkit/config/cpu_dockers.json new file mode 100644 index 0000000..c7079a8 --- /dev/null +++ b/brats_toolkit/config/cpu_dockers.json @@ -0,0 +1,29 @@ +{ + "econib":{ + "name":"econib", + "author":"Michal Marcinkiewicz", + "fileformat":"gz-b17", + "runtime":"runc", + "id":"econib/brats-2018", + "command":" ", + "mountpoint":"/data" + }, + "lfb_rwth":{ + "name":"lfb_rwth", + "author":"Leon Weninger", + "fileformat":"gz-b17-t1", + "runtime":"runc", + "id":"leonweninger/brats18_segmentation", + "command":" ", + "mountpoint":"/data" + }, + "gbmnet":{ + "name":"gbmnet", + "author":"Nicholas Nuechterlein", + "fileformat":"gz-b17-t1", + "runtime":"runc", + "id":"nknuecht/gbmnet18", + "command":"python evaluate.py", + "mountpoint":"/data" + } +} diff --git a/brats_toolkit/config/dockers.json b/brats_toolkit/config/dockers.json new file mode 100644 index 0000000..e17a230 --- /dev/null +++ b/brats_toolkit/config/dockers.json @@ -0,0 +1,167 @@ +{ + "econib":{ + "name":"econib", + "author":"Michal Marcinkiewicz", + "fileformat":"gz-b17", + "runtime":"runc", + "id":"econib/brats-2018", + "command":" ", + "mountpoint":"/data", + "citation": "https://link.springer.com/chapter/10.1007/978-3-030-11726-9_2" + }, + "mic-dkfz":{ + "name":"mic-dkfz", + "author":"Fabian Isensee", + "fileformat":"gz-b17-f", + "runtime":"nvidia", + "id":"fabianisensee/isen2018", + "command":"python3.6 predict_patient.py", + "mountpoint":"/data", + "citation": "https://link.springer.com/chapter/10.1007/978-3-030-11726-9_21" + }, + "scan":{ + "name":"scan", + "author":"Richard McKinley", + "fileformat":"b17-t1", + "runtime":"nvidia", + "id":"mckinleyscan/brats:v1", + "command":"python DeepSCAN_BRATS.py", + "mountpoint":"/data", + "citation": "https://link.springer.com/chapter/10.1007/978-3-030-11726-9_40" + }, + "xfeng":{ + "name":"xfeng", + "author":"Xue Feng", + "fileformat":"gz-b17-f", + "runtime":"nvidia", + "id":"xf4j/brats18", + "command":"python /brats18/docker_run_survival.py", + "mountpoint":"/data", + "citation": "https://link.springer.com/chapter/10.1007/978-3-030-11726-9_25" + }, + "lfb_rwth":{ + "name":"lfb_rwth", + "author":"Leon Weninger", + "fileformat":"gz-b17-t1", + "runtime":"runc", + "id":"leonweninger/brats18_segmentation", + "command":" ", + "mountpoint":"/data", + "citation": "https://link.springer.com/chapter/10.1007/978-3-030-11726-9_1" + }, + "gbmnet":{ + "name":"gbmnet", + "author":"Nicholas Nuechterlein", + "fileformat":"gz-b17-t1", + "runtime":"runc", + "id":"nknuecht/gbmnet18", + "command":"python evaluate.py", + "mountpoint":"/data", + "citation": "https://link.springer.com/chapter/10.1007/978-3-030-11726-9_22" + }, + "zyx_2019":{ + "name":"zyx_2019", + "author":"yuanxing.zhao@nlpr.ia.ac.cn", + "fileformat":"gz-b18", + "runtime":"nvidia", + "id":"jiaocha/zyxbrats", + "command":" ", + "mountpoint":"/data", + "citation": "https://link.springer.com/chapter/10.1007/978-3-030-32248-9_29" + }, + "scan_2019":{ + "name":"scan_2019", + "author":"Richard McKinley - richard.mckinley@gmail.com", + "fileformat":"gz-b18", + "runtime":"nvidia", + "id":"scan/brats2019", + "command":"python DeepSCAN_BRATS_2019.py", + "mountpoint":"/data", + "citation": "https://link.springer.com/chapter/10.1007/978-3-030-46640-4_36" + }, + "isen-20":{ + "name":"iisen-20", + "author":"Fabian Isensee", + "email": "f.isensee@dkfz-heidelberg.de", + "citation": "TODO", + "fileformat":"gz-b18", + "runtime":"nvidia", + "id":"brats/isen-20", + "command":"python runner.py", + "mountpoint":"/app/data/", + "user_mode": true, + "root_issue": false, + "flags": "" + }, + "hnfnetv1-20":{ + "name":"hnfnetv1-20", + "author":"Jia Haozhe", + "email": "HAJ39@pitt.edu", + "citation": "TODO", + "fileformat":"gz-b18", + "runtime":"nvidia", + "id":"brats/hnfnetv1-20", + "command":"", + "mountpoint":"/app/data/", + "user_mode": true, + "root_issue": false, + "flags": "" + }, + "yixinmpl-20":{ + "name":"yixinmpl-20", + "author":"Yixin Wang", + "email": "wangyixin19@mails.ucas.ac.scan2020cn", + "citation": "TODO", + "fileformat":"gz-b18", + "runtime":"nvidia", + "id":"brats/yixinmpl-20", + "command":"python run.py", + "mountpoint":"/workspace/data/", + "user_mode": false, + "root_issue": true, + "result_creation": false, + "flags": "" + }, + "sanet0-20":{ + "name":"sanet0-20", + "author":"Yading Yuan", + "email": "yading.yuan@mountsinai.org", + "citation": "TODO", + "fileformat":"gz-b18", + "runtime":"nvidia", + "id":"brats/sanet0-20", + "command":"", + "mountpoint":"/app/data/", + "user_mode": true, + "root_issue": false, + "flags": "--shm-size=\"128G\"" + }, + "scan_lite-20":{ + "name":"scan_lite-20", + "author":"Richard McKinley", + "email": "richard.mckinley@gmail.com", + "citation": "TODO", + "fileformat":"gz-b18", + "runtime":"nvidia", + "id":"brats/scan-20", + "command":"python3 DeepSCAN_BRATS_2020.py -l", + "mountpoint":"/app/data/", + "user_mode": true, + "root_issue": false, + "flags": "" + }, + "scan-20":{ + "name":"scan2020", + "author":"Richard McKinley", + "email": "richard.mckinley@gmail.com", + "citation": "TODO", + "fileformat":"gz-b18", + "runtime":"nvidia", + "id":"brats/scan-20", + "command":"python3 DeepSCAN_BRATS_2020.py", + "mountpoint":"/app/data/", + "user_mode": true, + "root_issue": false, + "flags": "" + } +} diff --git a/brats_toolkit/config/dockers_demo.json b/brats_toolkit/config/dockers_demo.json new file mode 100644 index 0000000..9455d30 --- /dev/null +++ b/brats_toolkit/config/dockers_demo.json @@ -0,0 +1,20 @@ +{ + "mocker":{ + "name":"mocker", + "author":"Christoph Berger", + "fileformat":"gz-b17", + "runtime":"runc", + "id":"cberger/mockdock", + "command":" ", + "mountpoint":"/data" + }, + "mocker2":{ + "name":"mocker", + "author":"Christoph Berger", + "fileformat":"gz-b17", + "runtime":"runc", + "id":"cberger/mockdock", + "command":" ", + "mountpoint":"/data" + } +} diff --git a/brats_toolkit/config/fileformats.json b/brats_toolkit/config/fileformats.json new file mode 100644 index 0000000..408621f --- /dev/null +++ b/brats_toolkit/config/fileformats.json @@ -0,0 +1,38 @@ +{ + "gz-b17":{ + "t1":"t1.nii.gz", + "t1c":"t1c.nii.gz", + "t2":"t2.nii.gz", + "fla":"flair.nii.gz" + }, + "gz-b17-t1":{ + "t1":"t1.nii.gz", + "t1c":"t1ce.nii.gz", + "t2":"t2.nii.gz", + "fla":"flair.nii.gz" + }, + "gz-b17-f":{ + "t1":"t1.nii.gz", + "t1c":"t1c.nii.gz", + "t2":"t2.nii.gz", + "fla":"fla.nii.gz" + }, + "b17":{ + "t1":"t1.nii", + "t1c":"t1c.nii", + "t2":"t2.nii", + "fla":"flair.nii" + }, + "b17-t1":{ + "t1":"t1.nii", + "t1c":"t1ce.nii", + "t2":"t2.nii", + "fla":"flair.nii" + }, + "gz-b18":{ + "t1":"pat_t1.nii.gz", + "t1c":"pat_t1ce.nii.gz", + "t2":"pat_t2.nii.gz", + "fla":"pat_flair.nii.gz" + } +} diff --git a/brats_toolkit/config/gpu_dockers.json b/brats_toolkit/config/gpu_dockers.json new file mode 100644 index 0000000..15d90e2 --- /dev/null +++ b/brats_toolkit/config/gpu_dockers.json @@ -0,0 +1,47 @@ +{ + "mic-dkfz":{ + "name":"mic-dkfz", + "author":"Fabian Isensee", + "fileformat":"gz-b17-f", + "runtime":"nvidia", + "id":"fabianisensee/isen2018", + "command":"python3.6 predict_patient.py", + "mountpoint":"/data" + }, + "scan":{ + "name":"scan", + "author":"Richard McKinley", + "fileformat":"b17-t1", + "runtime":"nvidia", + "id":"mckinleyscan/brats:v2", + "command":"python DeepSCAN_BRATS.py", + "mountpoint":"/data" + }, + "xfeng":{ + "name":"xfeng", + "author":"Xue Feng", + "fileformat":"gz-b17-f", + "runtime":"nvidia", + "id":"xf4j/brats18", + "command":"python /brats18/docker_run_survival.py", + "mountpoint":"/data" + }, + "zyx_2019":{ + "name":"zyx_2019", + "author":"yuanxing.zhao@nlpr.ia.ac.cn", + "fileformat":"gz-b18", + "runtime":"nvidia", + "id":"jiaocha/zyxbrats", + "command":" ", + "mountpoint":"/data" + }, + "scan_2019":{ + "name":"scan_2019", + "author":"Richard McKinley - richard.mckinley@gmail.com", + "fileformat":"gz-b18", + "runtime":"nvidia", + "id":"scan/brats2019", + "command":"python DeepSCAN_BRATS_2019.py", + "mountpoint":"/data" + } +} diff --git a/brats_toolkit/config/survival_dockers.json b/brats_toolkit/config/survival_dockers.json new file mode 100644 index 0000000..318d6ff --- /dev/null +++ b/brats_toolkit/config/survival_dockers.json @@ -0,0 +1,58 @@ +{ + "scan_lite-20":{ + "name":"scan2020_lite", + "author":"Richard McKinley", + "email": "richard.mckinley@gmail.com", + "citation": "TODO", + "fileformat":"gz-b18", + "runtime":"nvidia", + "id":"brats/scan-20", + "command":"python3 DeepSCAN_BRATS_2020.py -l", + "mountpoint":"/app/data/", + "user_mode": "True", + "root_issue": "False", + "flags": "" + }, + "scan-20":{ + "name":"scan2020", + "author":"Richard McKinley", + "email": "richard.mckinley@gmail.com", + "citation": "TODO", + "fileformat":"gz-b18", + "runtime":"nvidia", + "id":"brats/scan-20", + "command":"python3 DeepSCAN_BRATS_2020.py", + "mountpoint":"/app/data/", + "user_mode": "True", + "root_issue": "False", + "flags": "" + }, + "redneucon-20":{ + "name":"redneucon-20", + "author":"Proyecto Fundacion HM", + "email": "psegmentacnn@gmail.com", + "citation": "TODO", + "fileformat":"gz-b18", + "runtime":"nvidia", + "id":"brats/redneucon-20", + "command":"sh /Task2/predict/for_redistribution_files_only/run_predict.sh /opt/mcr/v98/", + "mountpoint":"/app/data/", + "user_mode": "True", + "root_issue": "False", + "flags": "" + }, + "brats_midl-20":{ + "name":"brats_midl-20", + "author":"Muhammad Junaid Ali", + "email": "junaid199f@gmail.com", + "citation": "TODO", + "fileformat":"gz-b18", + "runtime":"nvidia", + "id":"brats/brats_midl-20", + "command":"", + "mountpoint":"/app/data/", + "user_mode": "True", + "root_issue": "False", + "flags": "" + } +} diff --git a/brats_toolkit/fusionator.py b/brats_toolkit/fusionator.py new file mode 100644 index 0000000..829f9f5 --- /dev/null +++ b/brats_toolkit/fusionator.py @@ -0,0 +1,454 @@ +# -*- coding: utf-8 -*- +# Author: Christoph Berger +# Script for the fusion of segmentation labels +# +# Please refer to README.md and LICENSE.md for further documentation +# This software is not certified for clinical use. +import os +import logging +import itertools +import math + +import numpy as np +import os.path as op +from .util import own_itk as oitk +from .util import filemanager as fm + +class Fusionator(object): + def __init__(self, verbose=True): + self.verbose = verbose + + def binaryMav(self, candidates, weights=None): + ''' + binaryMav performs majority vote fusion on an arbitary number of input segmentations with + only two classes each (1 and 0). + + Args: + candidates (list): the candidate segmentations as binary numpy arrays of same shape + weights (list, optional): associated weights for each segmentation in candidates. Defaults to None. + + Return + array: a numpy array with the majority vote result + ''' + num = len(candidates) + if weights == None: + weights = itertools.repeat(1,num) + # manage empty calls + if num == 0: + print('ERROR! No segmentations to fuse.') + elif num == 1: + return candidates[0] + if self.verbose: + print ('Number of segmentations to be fused using compound majority vote is: ', num) + for c in candidates: + print('Candidate with shape {} and values {} and sum {}'.format(c.shape, np.unique(c), np.sum(c))) + # load first segmentation and use it to create initial numpy arrays + temp = candidates[0] + result = np.zeros(temp.shape) + #loop through all available segmentations and tally votes for each class + label = np.zeros(temp.shape) + for c, w in zip(candidates, weights): + if c.max() != 1 or c.min() != 0: + logging.warning('The passed segmentation contains labels other than 1 and 0.') + print('weight is: ' + str(w)) + label[c == 1] += 1.0*w + num = sum(weights) + result[label >= (num/2.0)] = 1 + if self.verbose: + print('Shape of result:', result.shape) + print('Shape of current input array:', temp.shape) + print('Labels and datatype of current output:', result.max(), + result.min(), result.dtype) + return result + + def mav(self, candidates, labels=None, weights=None): + ''' + mav performs majority vote fusion on an arbitary number of input segmentations with + an arbitrary number of labels. + + Args: + candidates (list): the candidate segmentations as binary numpy arrays of same shape + labels (list, optional): a list of labels present in the candidates. Defaults to None. + weights (list, optional): weights for the fusion. Defaults to None. + + Returns: + array: a numpy array with the majority vote result + ''' + num = len(candidates) + if weights == None: + weights = itertools.repeat(1,num) + # manage empty calls + if num == 0: + print('ERROR! No segmentations to fuse.') + if self.verbose: + print ('Number of segmentations to be fused using compound majority vote is: ', num) + # if no labels are passed, get the labels from the first input file (might lead to misisng labels!) + if labels == None: + labels = np.unique(candidates[0]) + for c in candidates: + labels = np.append(labels, np.unique(c)) + print('Labels of current candidate: {}, dtype: {}'.format(np.unique(c), c.dtype)) + labels = np.unique(labels).astype(int) + logging.warning('No labels passed, choosing those labels automatically: {}'.format(labels)) + # remove background label + if 0 in labels: + labels = np.delete(labels, 0) + # load first segmentation and use it to create initial numpy arrays + temp = candidates[0] + result = np.zeros(temp.shape) + #loop through all available segmentations and tally votes for each class + print('Labels: {}'.format(labels)) + for l in sorted(labels, reverse=True): + label = np.zeros(temp.shape) + num = 0 + for c, w in zip(candidates, weights): + print('weight is: ' + str(w)) + label[c == l] += 1.0*w + num = sum(weights) + print(num) + result[label >= (num/2.0)] = l + if self.verbose: + print('Shape of result:', result.shape) + print('Labels and datatype of result:', result.max(), + result.min(), result.dtype) + return result + + def brats_simple(self, candidates, weights=None, t=0.05, stop=25, inc=0.07, method='dice', iterations=25): + ''' + BRATS DOMAIN ADAPTED!!!!! simple implementation using DICE scoring + Iteratively estimates the accuracy of the segmentations and dynamically assigns weights + for the next iteration. Continues for each label until convergence is reached. + + Args: + candidates (list): [description] + weights (list, optional): [description]. Defaults to None. + t (float, optional): [description]. Defaults to 0.05. + stop (int, optional): [description]. Defaults to 25. + inc (float, optional): [description]. Defaults to 0.07. + method (str, optional): [description]. Defaults to 'dice'. + iterations (int, optional): [description]. Defaults to 25. + labels (list, optional): [description]. Defaults to None. + + Raises: + IOError: If no segmentations to be fused are passed + + Returns: + array: a numpy array with the SIMPLE fusion result + ''' + # manage empty calls + num = len(candidates) + if num == 0: + print('ERROR! No segmentations to fuse.') + raise IOError('No valid segmentations passed for SIMPLE Fusion') + if self.verbose: + print ('Number of segmentations to be fused using SIMPLE is: ', num) + # handle unpassed weights + if weights == None: + weights = itertools.repeat(1,num) + backup_weights = weights # ugly save to reset weights after each round + # get unique labels for multi-class fusion + + result = np.zeros(candidates[0].shape) + labels = [2,1,4] + logging.info('Fusing a segmentation with the labels: {}'.format(labels)) + # loop over each label + for l in labels: + if self.verbose: + print('Currently fusing label {}'.format(l)) + # load first segmentation and use it to create initial numpy arrays IFF it contains labels + if l == 2: + # whole tumor + bin_candidates = [(c > 0).astype(int) for c in candidates] + elif l == 1: + # tumor core + bin_candidates = [((c == 1) | (c == 4)).astype(int) for c in candidates] + else: + #active tumor + bin_candidates = [(c == 4).astype(int) for c in candidates] + if self.verbose: + print(bin_candidates[0].shape) + # baseline estimate + estimate = self.binaryMav(bin_candidates, weights) + #initial convergence baseline + conv = np.sum(estimate) + # check if the estimate was reasonable + if conv == 0: + logging.error('Majority Voting in SIMPLE returned an empty array') + # return np.zeros(candidates[0].shape) + # reset tau before each iteration + tau = t + for i in range(iterations): + t_weights = [] # temporary weights + for c in bin_candidates: + # score all canidate segmentations + t_weights.append((self._score(c, estimate, method)+1)**2) #SQUARED DICE! + weights = t_weights + # save maximum score in weights + max_phi = max(weights) + # remove dropout estimates + bin_candidates = [c for c, w in zip(bin_candidates, weights) if (w > t*max_phi)] + # calculate new estimate + estimate = self.binaryMav(bin_candidates, weights) + # increment tau + tau = tau+inc + # check if it converges + if np.abs(conv-np.sum(estimate)) < stop: + if self.verbose: + print('Convergence for label {} after {} iterations reached.'.format(l, i)) + break + conv = np.sum(estimate) + # assign correct label to result + result[estimate == 1] = l + # reset weights + weights = backup_weights + if self.verbose: + print('Shape of result:', result.shape) + print('Shape of current input array:', bin_candidates[0].shape) + print('Labels and datatype of current output:', result.max(), + result.min(), result.dtype) + return result + + def simple(self, candidates, weights=None, t=0.05, stop=25, inc=0.07, method='dice', iterations=25, labels=None): + ''' + simple implementation using DICE scoring + Iteratively estimates the accuracy of the segmentations and dynamically assigns weights + for the next iteration. Continues for each label until convergence is reached. + + Args: + candidates (list): [description] + weights (list, optional): [description]. Defaults to None. + t (float, optional): [description]. Defaults to 0.05. + stop (int, optional): [description]. Defaults to 25. + inc (float, optional): [description]. Defaults to 0.07. + method (str, optional): [description]. Defaults to 'dice'. + iterations (int, optional): [description]. Defaults to 25. + labels (list, optional): [description]. Defaults to None. + + Raises: + IOError: If no segmentations to be fused are passed + + Returns: + array: a numpy array with the SIMPLE fusion result + ''' + # manage empty calls + num = len(candidates) + if num == 0: + print('ERROR! No segmentations to fuse.') + raise IOError('No valid segmentations passed for SIMPLE Fusion') + if self.verbose: + print ('Number of segmentations to be fused using SIMPLE is: ', num) + # handle unpassed weights + if weights == None: + weights = itertools.repeat(1,num) + backup_weights = weights # ugly save to reset weights after each round + # get unique labels for multi-class fusion + if labels == None: + labels = np.unique(candidates[0]) + for c in candidates: + labels = np.append(labels, np.unique(c)) + print('Labels of current candidate: {}, dtype: {}'.format(np.unique(c), c.dtype)) + labels = np.unique(labels).astype(int) + logging.warning('No labels passed, choosing those labels automatically: {}'.format(labels)) + result = np.zeros(candidates[0].shape) + # remove background label + if 0 in labels: + labels = np.delete(labels, 0) + logging.info('Fusing a segmentation with the labels: {}'.format(labels)) + # loop over each label + for l in sorted(labels): + if self.verbose: + print('Currently fusing label {}'.format(l)) + # load first segmentation and use it to create initial numpy arrays IFF it contains labels + bin_candidates = [(c == l).astype(int) for c in candidates] + if self.verbose: + print(bin_candidates[0].shape) + # baseline estimate + estimate = self.binaryMav(bin_candidates, weights) + #initial convergence baseline + conv = np.sum(estimate) + # check if the estimate was reasonable + if conv == 0: + logging.error('Majority Voting in SIMPLE returned an empty array') + # return np.zeros(candidates[0].shape) + # reset tau before each iteration + tau = t + for i in range(iterations): + t_weights = [] # temporary weights + for c in bin_candidates: + # score all canidate segmentations + t_weights.append((self._score(c, estimate, method)+1)**2) #SQUARED DICE! + weights = t_weights + # save maximum score in weights + max_phi = max(weights) + # remove dropout estimates + bin_candidates = [c for c, w in zip(bin_candidates, weights) if (w > t*max_phi)] + # calculate new estimate + estimate = self.binaryMav(bin_candidates, weights) + # increment tau + tau = tau+inc + # check if it converges + if np.abs(conv-np.sum(estimate)) < stop: + if self.verbose: + print('Convergence for label {} after {} iterations reached.'.format(l, i)) + break + conv = np.sum(estimate) + # assign correct label to result + result[estimate == 1] = l + # reset weights + weights = backup_weights + if self.verbose: + print('Shape of result:', result.shape) + print('Shape of current input array:', bin_candidates[0].shape) + print('Labels and datatype of current output:', result.max(), + result.min(), result.dtype) + return result + + def dirFuse(self, directory, method='mav', outputPath=None, labels=None): + ''' + dirFuse [summary] + + Args: + directory ([type]): [description] + method (str, optional): [description]. Defaults to 'mav'. + outputName ([type], optional): [description]. Defaults to None. + ''' + if method == 'all': + return + candidates = [] + weights = [] + temp = None + for file in os.listdir(directory): + if file.endswith('.nii.gz'): + # skip existing fusions + if 'fusion' in file: + continue + temp = op.join(directory, file) + try: + candidates.append(oitk.get_itk_array(oitk.get_itk_image(temp))) + weights.append(1) + print('Loaded: ' + os.path.join(directory, file)) + except Exception as e: + print('Could not load this file: ' + file + ' \nPlease check if this is a valid path and that the files exists. Exception: ' + e) + if method == 'mav': + print('Orchestra: Now fusing all .nii.gz files in directory {} using MAJORITY VOTING. For more output, set the -v or --verbose flag or instantiate the fusionator class with verbose=true'.format(directory)) + result = self.mav(candidates, labels, weights) + elif method == 'simple': + print('Orchestra: Now fusing all .nii.gz files in directory {} using SIMPLE. For more output, set the -v or --verbose flag or instantiate the fusionator class with verbose=true'.format(directory)) + result = self.simple(candidates, weights) + elif method == 'brats-simple': + print('Orchestra: Now fusing all .nii.gz files in directory {} using BRATS-SIMPLE. For more output, set the -v or --verbose flag or instantiate the fusionator class with verbose=true'.format(directory)) + result = self.brats_simple(candidates, weights) + try: + if outputPath == None: + oitk.write_itk_image(oitk.make_itk_image(result, proto_image=oitk.get_itk_image(temp)), op.join(directory, method + '_fusion.nii.gz')) + else: + outputDir = op.dirname(outputPath) + os.makedirs(outputDir, exist_ok=True) + oitk.write_itk_image(oitk.make_itk_image(result, proto_image=oitk.get_itk_image(temp)), outputPath) + logging.info('Segmentation Fusion with method {} saved in directory {}.'.format(method, directory)) + except Exception as e: + print('Very bad, this should also be logged somewhere: ' + str(e)) + logging.exception('Issues while saving the resulting segmentation: {}'.format(str(e))) + + def fuse(self, segmentations, outputPath, method='mav', weights=None, labels=None): + ''' + fuse [summary] + + Args: + segmentations ([type]): [description] + outputPath ([type]): [description] + method (str, optional): [description]. Defaults to 'mav'. + weights ([type], optional): [description]. Defaults to None. + + Raises: + IOError: [description] + ''' + candidates = [] + if weights is not None: + if len(weights) != len(segmentations): + raise IOError('Please pass a matching number of weights and segmentation files') + w_weights = weights + else: + w_weights = [] + for seg in segmentations: + if seg.endswith('.nii.gz'): + try: + candidates.append(oitk.get_itk_array(oitk.get_itk_image(seg))) + if weights is None: + w_weights.append(1) + print('Loaded: ' + seg) + except Exception as e: + print('Could not load this file: ' + seg + ' \nPlease check if this is a valid path and that the files exists. Exception: ' + str(e)) + raise + if method == 'mav': + print('Orchestra: Now fusing all passed .nii.gz files using MAJORITY VOTING. For more output, set the -v or --verbose flag or instantiate the fusionator class with verbose=true') + result = self.mav(candidates, labels=labels, weights=w_weights) + elif method == 'simple': + print('Orchestra: Now fusing all passed .nii.gz files in using SIMPLE. For more output, set the -v or --verbose flag or instantiate the fusionator class with verbose=true') + result = self.simple(candidates, w_weights) + elif method == 'brats-simple': + print('Orchestra: Now fusing all .nii.gz files in directory {} using BRATS-SIMPLE. For more output, set the -v or --verbose flag or instantiate the fusionator class with verbose=true') + result = self.brats_simple(candidates, w_weights) + try: + outputDir = op.dirname(outputPath) + os.makedirs(outputDir, exist_ok=True) + oitk.write_itk_image(oitk.make_itk_image(result, proto_image=oitk.get_itk_image(seg)), outputPath) + logging.info('Segmentation Fusion with method {} saved as {}.'.format(method, outputPath)) + except Exception as e: + print('Very bad, this should also be logged somewhere: ' + str(e)) + logging.exception('Issues while saving the resulting segmentation: {}'.format(str(e))) + + def _score(self, seg, gt, method='dice'): + ''' Calculates a similarity score based on the + method specified in the parameters + Input: Numpy arrays to be compared, need to have the + same dimensions (shape) + Default scoring method: DICE coefficient + method may be: 'dice' + 'auc' + 'bdice' + returns: a score [0,1], 1 for identical inputs + ''' + try: + # True Positive (TP): we predict a label of 1 (positive) and the true label is 1. + TP = np.sum(np.logical_and(seg == 1, gt == 1)) + # True Negative (TN): we predict a label of 0 (negative) and the true label is 0. + TN = np.sum(np.logical_and(seg == 0, gt == 0)) + # False Positive (FP): we predict a label of 1 (positive), but the true label is 0. + FP = np.sum(np.logical_and(seg == 1, gt == 0)) + # False Negative (FN): we predict a label of 0 (negative), but the true label is 1. + FN = np.sum(np.logical_and(seg == 0, gt == 1)) + FPR = FP/(FP+TN) + FNR = FN/(FN+TP) + TPR = TP/(TP+FN) + TNR = TN/(TN+FP) + except ValueError: + print('Value error encountered!') + return 0 + # faster dice? Oh yeah! + if method == 'dice': + # default dice score + score = 2*TP/(2*TP+FP+FN) + elif method == 'auc': + # AUC scoring + score = 1 - (FPR+FNR)/2 + elif method == 'bdice': + # biased dice towards false negatives + score = 2*TP/(2*TP+FN) + elif method == 'spec': + #specificity + score = TN/(TN+FP) + elif method == 'sens': + # sensitivity + score = TP/(TP+FN) + elif method == 'toterr': + score = (FN+FP)/(155*240*240) + elif method == 'ppv': + prev = np.sum(gt)/(155*240*240) + temp = TPR*prev + score = (temp)/(temp + (1-TNR)*(1-prev)) + else: + score = 0 + if np.isnan(score) or math.isnan(score): + score = 0 + return score diff --git a/brats_toolkit/preprocessor.py b/brats_toolkit/preprocessor.py new file mode 100644 index 0000000..c2dc830 --- /dev/null +++ b/brats_toolkit/preprocessor.py @@ -0,0 +1,140 @@ +import socketio +from brats_toolkit.util.docker_functions import start_docker, stop_docker, update_docker +import os +import tempfile +from pathlib import Path + +from brats_toolkit.util.prep_utils import tempFiler +import sys + + +class Preprocessor(object): + def __init__(self, noDocker=False): + # settings + self.clientVersion = "0.0.1" + self.confirmationRequired = True + self.mode = "cpu" + self.gpuid = "0" + + # init sio client + self.sio = socketio.Client() + + # set docker usage + self.noDocker = noDocker + + @self.sio.event + def connect(): + print("connection established! sid:", self.sio.sid) + # client identification + self.sio.emit("clientidentification", { + "brats_cli": self.clientVersion, "proc_mode": self.mode}) + + @self.sio.event + def connect_error(): + print("The connection failed!") + + @self.sio.event + def disconnect(): + print('disconnected from server') + + @self.sio.on('message') + def message(data): + print('message', data) + + @self.sio.on('status') + def on_status(data): + print('status reveived: ', data) + if data['message'] == "client ID json generation finished!": + self.inspect_input() + elif data['message'] == "input inspection finished!": + if "data" in data: + print("input inspection found the following exams: ", + data['data']) + if self.confirmationRequired: + confirmation = input( + "press \"y\" to continue or \"n\" to scan the input folder again.").lower() + else: + confirmation = "y" + + if confirmation == "n": + self.inspect_input() + + if confirmation == "y": + self.process_start() + + elif data['message'] == "image processing successfully completed.": + self.sio.disconnect() + stop_docker() + sys.exit(0) + + @self.sio.on('client_outdated') + def outdated(data): + print("Your client version", self.clientVersion, "is outdated. Please download version", data, + "from:") + print("https://neuronflow.github.io/brats-preprocessor/") + self.sio.disconnect() + stop_docker() + sys.exit(0) + + @self.sio.on('ipstatus') + def on_ipstatus(data): + print("image processing status reveived:") + print(data['examid'], ": ", data['ipstatus']) + + def single_preprocess(self, t1File, t1cFile, t2File, flaFile, outputFolder, mode, confirm=False, skipUpdate=False, gpuid='0'): + # assign name to file + print("basename:", os.path.basename(outputFolder)) + outputPath = Path(outputFolder) + dockerOutputFolder = os.path.abspath(outputPath.parent) + + # create temp dir + storage = tempfile.TemporaryDirectory() + # TODO this is a potential security hazzard as all users can access the files now, but currently it seems the only way to deal with bad configured docker installations + os.chmod(storage.name, 0o777) + dockerFolder = os.path.abspath(storage.name) + tempFolder = os.path.join(dockerFolder, os.path.basename(outputFolder)) + + os.makedirs(tempFolder, exist_ok=True) + print("tempFold:", tempFolder) + + # create temp Files + tempFiler(t1File, "t1", tempFolder) + tempFiler(t1cFile, "t1c", tempFolder) + tempFiler(t2File, "t2", tempFolder) + tempFiler(flaFile, "fla", tempFolder) + + self.batch_preprocess(exam_import_folder=dockerFolder, exam_export_folder=dockerOutputFolder, mode=mode, + confirm=confirm, skipUpdate=skipUpdate, gpuid=gpuid) + + def batch_preprocess(self, exam_import_folder=None, exam_export_folder=None, dicom_import_folder=None, + nifti_export_folder=None, + mode="cpu", confirm=True, skipUpdate=False, gpuid='0'): + if confirm != True: + self.confirmationRequired = False + self.mode = mode + self.gpuid = gpuid + + if self.noDocker != True: + stop_docker() + if skipUpdate != True: + update_docker() + start_docker(exam_import_folder=exam_import_folder, exam_export_folder=exam_export_folder, + dicom_import_folder=dicom_import_folder, nifti_export_folder=nifti_export_folder, mode=self.mode, gpuid=self.gpuid) + + # setup connection + # TODO do this in a more elegant way and somehow check whether docker is up and running before connect + self.sio.sleep(5) # wait 5 secs for docker to start + self.connect_client() + self.sio.wait() + + def connect_client(self): + self.sio.connect('http://localhost:5000') + print('sid:', self.sio.sid) + + def inspect_input(self): + print("sending input inspection request!") + self.sio.emit("input_inspection", {'hurray': 'yes'}) + + def process_start(self): + print("sending processing request!") + self.sio.emit("brats_processing", {'hurray': 'yes'}) diff --git a/brats_toolkit/segmentor.py b/brats_toolkit/segmentor.py new file mode 100644 index 0000000..991220a --- /dev/null +++ b/brats_toolkit/segmentor.py @@ -0,0 +1,371 @@ +# -*- coding: utf-8 -*- +# Author: Christoph Berger +# Script for evaluation and bulk segmentation of Brain Tumor Scans +# using the MICCAI BRATS algorithmic repository +# +# Please refer to README.md and LICENSE.md for further documentation +# This software is not certified for clinical use. + +__version__ = '0.1' +__author__ = 'Christoph Berger' + +import errno +import glob +import json +import logging +import os +import os.path as op +import subprocess +import sys +import tempfile + +import numpy as np + +from . import fusionator +from .util import filemanager as fm +from .util import own_itk as oitk + + +class Segmentor(object): + ''' + Now does it all! + ''' + + def __init__(self, config=None, fileformats=None, verbose=True, tty=False, newdocker=True, gpu='0'): + ''' Init the orchestra class with placeholders + ''' + self.noOfContainers = 0 + self.config = [] + self.directory = None + self.verbose = verbose + self.tty = tty + self.dockerGPU = newdocker + self.gpu = gpu + self.package_directory = op.dirname(op.abspath(__file__)) + # set environment variables to limit GPU usage + os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID' # see issue #152 + os.environ['CUDA_VISIBLE_DEVICES'] = gpu + if config is None: + config = op.join(self.package_directory, 'config', 'dockers.json') + if fileformats is None: + self.fileformats = op.join( + self.package_directory, 'config', 'fileformats.json') + else: + self.fileformats = fileformats + try: + configfile = open(config, 'r') + self.config = json.load(configfile) + self.noOfContainers = len(self.config.keys()) + configfile.close() + except IOError as e: + logging.exception( + 'I/O error({0}): {1}'.format(e.errno, e.strerror)) + raise + except ValueError: + logging.exception('Invalid configuration file') + raise + except: + logging.exception('Unexpected Error!') + raise + + def getFileFormat(self, index): + return self.config[index]['fileformat'] + + def getContainerName(self, index): + return self.config[index]['name'] + + def getNumberOfContainers(self): + return len(self.config) + + def runDummyContainer(self, stop=False): + command = 'docker run --rm -it hello-world' + subprocess.check_call(command, shell=True) + + def runContainer(self, id, directory, outputDir, outputName): + ''' + Runs one container on one patient folder + ''' + logging.info( + 'Now running a segmentation with the Docker {}.'.format(id)) + logging.info('Output will be in {}.'.format(outputDir)) + + params = self.config[id] # only references, doesn't copy + command = 'docker run --rm' + # assemble the rest of the command + flags = params.get('flags', '') + # check if we need to map the user + if params.get('user_mode', False): + user_flags = '--user $(id -u):$(id -g)' + else: + user_flags = '' + # assemble the gpu flags if needed + if params['runtime'] == 'nvidia': + if self.dockerGPU: + # TODO clean this up + gpu_flags = '--gpus device=' + str(self.gpu) + else: + gpu_flags = '--runtime=nvidia -e CUDA_VISIBLE_DEVICES=' + \ + str(self.gpu) + else: + gpu_flags = '' + # assemble directory mapping + volume = '-v ' + str(directory) + ':' + str(params['mountpoint']) + # assemble execution command + call = str(params['command']) + + # stick everything together + command = command + ' ' + user_flags + ' ' + gpu_flags + ' ' + \ + flags + ' ' + volume + ' ' + params['id'] + ' ' + call + + if self.verbose: + print('Executing: {}'.format(command)) + try: + with open(op.join(outputDir, '{}_output.log'.format(outputName.split('.')[0])), 'w') as f: + subprocess.check_call(command, shell=True, stdout=f) + except Exception as e: + logging.error( + 'Segmentation failed for case {} with error: {}'.format(directory, e)) + if 'exit status 125' in str(e): + logging.error( + 'DOCKER DAEMON not running! Please start your Docker runtime.') + sys.exit(125) + return False + if self.verbose: + logging.info('Container exited without error') + # fileh.close() + return True + + def runIterate(self, dir, cid): + ''' Iterates over a directory and runs the segmentation on each patient found + ''' + logging.info('Looking for BRATS data directories..') + for fn in os.listdir(dir): + if not os.path.isdir(os.path.join(dir, fn)): + continue # Not a directory + if 'DE_RI' in fn: + logging.info('Found pat data: {}'.format(fn)) + try: + os.makedirs(os.path.join(os.path.join(dir, fn), + 'results')) + except OSError as err: + if err.errno != errno.EEXIST: + raise + logging.info('Calling Container: {}'.format(cid)) + if not self.runContainer(cid, os.path.join(dir, fn), dir): + logging.info( + 'ERROR: Run failed for patient {} with container {}'.format(fn, cid)) + return False + # TODO: rename folder and prepend pat_id + # rename_folder(img_id, os.path.join(directory, fn), fn) + return True + + def multiSegment(self, tempDir, inputs, method, outputName, outputDir): + ''' + multiSegment [summary] + + Args: + tempDir ([type]): [description] + inputs ([type]): [description] + method ([type]): [description] + outputName ([type]): [description] + outputDir ([type]): [description] + ''' + logging.debug('CALLED MULTISEGMENT') + fusion = fusionator.Fusionator() + for cid in self.config.keys(): + # replace this with a call to single-segment + logging.info('[Orchestra] Segmenting with ' + cid) + ff = self._format(self.getFileFormat(cid), self.fileformats) + for key, img in inputs.items(): + savepath = op.join(tempDir, ff[key]) + img = oitk.get_itk_image(img) + if self.verbose: + logging.info( + '[Weborchestra][Info] Writing to path {}'.format(savepath)) + oitk.write_itk_image(img, savepath) + if self.verbose: + logging.info('[Weborchestra][Info] Images saved correctly') + logging.info( + '[Weborchestra][Info] Starting the Segmentation with container {} now'.format(cid)) + + status = self.runContainer(cid, tempDir, outputDir) + status = self.runContainer(cid, tempDir, outputDir, outputName) + if status: + if self.verbose: + logging.info('[Weborchestra][Success] Segmentation saved') + resultsDir = op.join(tempDir, 'results/') + saveLocation = op.join(outputDir, cid + '_tumor_seg.nii.gz') + self._handleResult(cid, resultsDir, outputPath=saveLocation) + else: + logging.exception( + 'Container run for CID {} failed!'.format(cid)) + fusion.dirFuse(outputDir, method=method, + outputPath=op.join(outputDir, outputName)) + + def singleSegment(self, tempDir, inputs, cid, outputName, outputDir): + ''' + singleSegment [summary] + + Args: + tempDir ([type]): [description] + inputs ([type]): [description] + cid ([type]): [description] + outputName ([type]): [description] + outputDir ([type]): [description] + ''' + ff = self._format(self.getFileFormat(cid), self.fileformats) + for key, img in inputs.items(): + savepath = op.join(tempDir, ff[key]) + img = oitk.get_itk_image(img) + if self.verbose: + logging.info( + '[Weborchestra][Info] Writing to path {}'.format(savepath)) + oitk.write_itk_image(img, savepath) + if self.verbose: + logging.info('[Weborchestra][Info] Images saved correctly') + logging.info( + '[Weborchestra][Info] Starting the Segmentation with {} now'.format(cid)) + status = self.runContainer(cid, tempDir, outputDir, outputName) + if status: + if self.verbose: + logging.info('[Weborchestra][Success] Segmentation saved') + resultsDir = op.join(tempDir, 'results/') + self._handleResult( + cid, resultsDir, outputPath=op.join(outputDir, outputName)) + # delete tmp directory if result was saved elsewhere + else: + logging.error( + '[Weborchestra][Error] Segmentation failed, see output!') + + def segment(self, t1=None, t1c=None, t2=None, fla=None, cid='mocker', outputPath=None): + ''' + segment [summary] + + Args: + t1 ([type], optional): [description]. Defaults to None. + t1c ([type], optional): [description]. Defaults to None. + t2 ([type], optional): [description]. Defaults to None. + fla ([type], optional): [description]. Defaults to None. + cid (str, optional): [description]. Defaults to 'mocker'. + outputPath ([type], optional): [description]. Defaults to None. + ''' + # Call output method here + outputName, outputDir = self._whereDoesTheFileGo(outputPath, t1, cid) + # set up logging (for all internal functions) + logging.basicConfig(format='%(asctime)s %(levelname)s:%(message)s', filename=op.join( + outputDir, 'segmentor_high_level.log'), level=logging.DEBUG) + logging.getLogger().addHandler(logging.StreamHandler()) + logging.debug('DIRNAME is: ' + outputDir) + logging.debug('FILENAME is: ' + outputName) + logging.info( + 'Now running a new set of segmentations on input: {}'.format(op.dirname(t1))) + # switch between + inputs = {'t1': t1, 't2': t2, 't1c': t1c, 'fla': fla} + # create temporary directory for storage + storage = tempfile.TemporaryDirectory(dir=self.package_directory) + # TODO this is a potential security hazzard as all users can access the files now, but currently it seems the only way to deal with bad configured docker installations + os.chmod(storage.name, 0o777) + tempDir = op.abspath(storage.name) + resultsDir = op.join(tempDir, 'results') + os.mkdir(resultsDir) + # TODO this is a potential security hazzard as all users can access the files now, but currently it seems the only way to deal with bad configured docker installations + os.chmod(resultsDir, 0o777) + logging.debug(tempDir) + logging.debug(resultsDir) + + if cid == 'mav' or cid == 'simple' or cid == 'all': + # segment with all containers + logging.info('Called singleSegment with method: ' + cid) + self.multiSegment(tempDir, inputs, cid, outputName, outputDir) + else: + # segment only with a single container + logging.info('Called singleSegment with docker: ' + cid) + self.singleSegment(tempDir, inputs, cid, outputName, outputDir) + + ### Private utility methods below ### + + def _whereDoesTheFileGo(self, outputPath, t1path, cid): + if outputPath is None: + outputDir = op.join(op.dirname(t1path), 'output') + outputName = cid + '_segmentation.nii.gz' + elif outputPath.endswith('.nii.gz'): + if '~' in outputPath: + outputPath = op.expanduser(outputPath) + # valid filename + outputDir = op.dirname(outputPath) + outputName = op.basename(outputPath) + # if only a filename is passed, use the t1 directory + if outputDir == '': + outputDir = op.join(op.dirname(t1path), 'output') + else: + outputDir = outputName = None + + if outputDir is None or outputName is None: + raise ValueError('The outputPath is ambiguous and cannot be determined! path: {}, t1path: {}, cid: {}'.format( + outputPath, t1path, cid)) + # build abspaths: + outputDir = op.abspath(outputDir) + try: + os.makedirs(outputDir, exist_ok=True) + except Exception as e: + print('could not create target directory: {}'.format(outputDir)) + raise e + return outputName, outputDir + + def _handleResult(self, cid, directory, outputPath): + ''' + This function handles the copying and renaming of the + Segmentation result before returning + ''' + # Todo: Find segmentation result + contents = glob.glob( + op.join(directory, 'tumor_' + cid + '_class.nii*')) + if len(contents) == 0: + contents = glob.glob(op.join(directory, 'tumor_*_class.nii*')) + if len(contents) == 0: + contents = glob.glob(op.join(directory, cid + '*.nii*')) + if len(contents) == 0: + contents = glob.glob(op.join(directory, '*tumor*.nii*')) + if len(contents) < 1: + logging.error( + '[Weborchestra - Filehandling][Error] No segmentation saved, the container run has most likely failed.') + elif len(contents) > 1: + logging.warning( + '[Weborchestra - Filehandling][Warning] Multiple Segmentations found') + print('found files: {}'.format(contents)) + img = oitk.get_itk_image(contents[0]) + labels = 0 + exportImg = None + for _, c in enumerate(contents): + img = oitk.get_itk_image(c) + if labels < len(np.unique(oitk.get_itk_array(img))): + exportImg = img + labels = len(np.unique(oitk.get_itk_array(img))) + oitk.write_itk_image(exportImg, op.join(outputPath)) + logging.warning( + '[Weborchestra - Filehandling][Warning] Segmentation with most labels ({}) for cid {} saved'.format(labels, cid)) + return + img = oitk.get_itk_image(contents[0]) + for c in contents: + os.remove(op.join(directory, c)) + oitk.write_itk_image(img, outputPath) + + def _format(self, fileformat, configpath, verbose=True): + # load fileformat for a given container + try: + configfile = open(op.abspath(configpath), 'r') + config = json.load(configfile) + configfile.close() + except IOError as e: + logging.exception( + 'I/O error({0}): {1}'.format(e.errno, e.strerror)) + raise + except ValueError: + logging.exception('Invalid configuration file') + raise + except: + logging.exception('Unexpected Error!') + raise + logging.info('[Weborchestra][Success]Loaded fileformat: {}'.format( + config[fileformat])) + return config[fileformat] diff --git a/brats_toolkit/util/__init__.py b/brats_toolkit/util/__init__.py new file mode 100755 index 0000000..8b13789 --- /dev/null +++ b/brats_toolkit/util/__init__.py @@ -0,0 +1 @@ + diff --git a/brats_toolkit/util/backend_scripts/unix_docker.sh b/brats_toolkit/util/backend_scripts/unix_docker.sh new file mode 100755 index 0000000..793ee81 --- /dev/null +++ b/brats_toolkit/util/backend_scripts/unix_docker.sh @@ -0,0 +1,29 @@ +#!/usr/bin/env bash +echo "executing unix_docker.sh" +echo $1 +echo $2 +echo $3 +echo $4 +echo $5 +# check if parameter is present +if [ $# -eq 0 ]; then + echo "You must enter the number of desired workers and 5 paths!, e.g. docker_run.sh 2 /setting /dicom_import /nifti_export /exam_import /exam_export" + exit 1 +fi + +docker stop greedy_elephant +docker run --rm -d --name=greedy_elephant -p 5000:5000 -p 9181:9181 -v "$2":"/data/import/dicom_import" -v "$3":"/data/export/nifti_export" -v "$4":"/data/import/exam_import" -v "$5":"/data/export/exam_export" projectelephant/server redis-server +#wait until everything is started up +sleep 5 +#start x-server for non-gui gui +docker exec -d greedy_elephant /bin/bash -c "source ~/.bashrc; Xorg -noreset +extension GLX +extension RANDR +extension RENDER -logfile ./etc/10.log -config ./etc/X11/xorg.conf :0;" +docker exec -d greedy_elephant python3 elephant_server.py +docker exec -d greedy_elephant /bin/bash -c "source ~/.bashrc; rq-dashboard;" +#ugly format to set correct path variable every time! (as .bashrc doesn't want to work) +docker exec -d greedy_elephant /bin/bash -c "source ~/.bashrc; ./start_workers.sh;" + +# TODO fix user thing, also need to add user on exec +# userid=$(id -u) +# usergroup=$(id -g) +# echo $userid:$usergroup +# --user $userid:$usergroup diff --git a/brats_toolkit/util/backend_scripts/unix_docker_gpu.sh b/brats_toolkit/util/backend_scripts/unix_docker_gpu.sh new file mode 100755 index 0000000..66dac66 --- /dev/null +++ b/brats_toolkit/util/backend_scripts/unix_docker_gpu.sh @@ -0,0 +1,24 @@ +#!/usr/bin/env bash +# check if parameter is present +if [ $# -eq 0 ]; then + echo "You must enter the number of desired workers and 5 paths!, e.g. docker_run.sh 2 /setting /dicom_import /nifti_export /exam_import /exam_export" + exit 1 +fi + +docker stop greedy_elephant +# TODO set gpu +docker run --rm -d --name=greedy_elephant --gpus device=$6 -p 5000:5000 -p 9181:9181 -v "$2":"/data/import/dicom_import" -v "$3":"/data/export/nifti_export" -v "$4":"/data/import/exam_import" -v "$5":"/data/export/exam_export" projectelephant/server redis-server +#wait until everything is started up +sleep 10 +#start x-server for non-gui gui +docker exec -d greedy_elephant /bin/bash -c "source ~/.bashrc; Xorg -noreset +extension GLX +extension RANDR +extension RENDER -logfile ./etc/10.log -config ./etc/X11/xorg.conf :0;" +docker exec -d greedy_elephant python3 elephant_server.py +docker exec -d greedy_elephant /bin/bash -c "source ~/.bashrc; rq-dashboard;" +#ugly format to set correct path variable every time! (as .bashrc doesn't want to work) +docker exec -d greedy_elephant /bin/bash -c "source ~/.bashrc; ./start_workers.sh;" + +# TODO fix user thing, also need to add user on exec +# userid=$(id -u) +# usergroup=$(id -g) +# echo $userid:$usergroup +# --user $userid:$usergroup diff --git a/brats_toolkit/util/backend_scripts/win_docker.cmd b/brats_toolkit/util/backend_scripts/win_docker.cmd new file mode 100755 index 0000000..6c03097 --- /dev/null +++ b/brats_toolkit/util/backend_scripts/win_docker.cmd @@ -0,0 +1,11 @@ +SETLOCAL ENABLEEXTENSIONS +SET me=%~n0 +SET parent=%~dp0 +REM get container ID and stop it (rm is automatic) +docker stop greedy_elephant +docker run --rm -d --name=greedy_elephant -p 5000:5000 -p 9181:9181 -v %2:"/data/import/dicom_import" -v %3:"/data/export/nifti_export" -v %4:"/data/import/exam_import" -v %5:"/data/export/exam_export" projectelephant/server redis-server +docker exec -d greedy_elephant /bin/bash -c "source ~/.bashrc; Xorg -noreset +extension GLX +extension RANDR +extension RENDER -logfile ./etc/10.log -config ./etc/X11/xorg.conf :0;" +docker exec -d greedy_elephant python3 elephant_server.py +docker exec -d greedy_elephant /bin/bash -c "source ~/.bashrc; rq-dashboard;" +docker exec -d greedy_elephant /bin/bash -c "source ~/.bashrc; ./start_workers.sh;" +del temp.txt diff --git a/brats_toolkit/util/docker_functions.py b/brats_toolkit/util/docker_functions.py new file mode 100644 index 0000000..d6d31fe --- /dev/null +++ b/brats_toolkit/util/docker_functions.py @@ -0,0 +1,93 @@ +import shlex +import platform +import pathlib +import subprocess +import os + + +def start_docker(exam_import_folder=None, + exam_export_folder=None, dicom_import_folder=None, nifti_export_folder=None, mode="cpu", gpuid='0'): + # deal with missing arguments + if dicom_import_folder is None: + dicom_import_folder = exam_import_folder + if nifti_export_folder is None: + nifti_export_folder = exam_export_folder + if exam_import_folder is None: + exam_import_folder = dicom_import_folder + if exam_export_folder is None: + exam_export_folder = nifti_export_folder + + # convert to absolute path + exam_import_folder = os.path.abspath(exam_import_folder) + exam_export_folder = os.path.abspath(exam_export_folder) + dicom_import_folder = os.path.abspath(dicom_import_folder) + nifti_export_folder = os.path.abspath(nifti_export_folder) + print("exam_import_folder:", exam_import_folder) + print("dicom_import_folder:", dicom_import_folder) + + print("exam_export_folder:", exam_export_folder) + print("nifti_export_folder:", nifti_export_folder) + + # make sure directories exist + os.makedirs(nifti_export_folder, exist_ok=True) + os.makedirs(exam_export_folder, exist_ok=True) + + # start the right docker + operatingSystem = platform.system() + if operatingSystem == "Windows": + bashscript = os.path.normpath( + './backend_scripts/win_docker.cmd') + else: + if mode == "cpu": + bashscript = os.path.normpath( + './backend_scripts/unix_docker.sh') + elif mode == "robex": + bashscript = os.path.normpath( + './backend_scripts/unix_docker.sh') + elif mode == "gpu": + bashscript = os.path.normpath( + './backend_scripts/unix_docker_gpu.sh') + elif mode == "gpu_hdbet": + bashscript = os.path.normpath( + './backend_scripts/unix_docker_gpu.sh') + + # generate subprocess call + command = [bashscript, "3", dicom_import_folder, + nifti_export_folder, exam_import_folder, exam_export_folder, gpuid] + print(*command) + + cwd = pathlib.Path(__file__).resolve().parent + print(cwd) + + print("starting docker!") + subprocess.run(command, cwd=cwd) + print("docker started!") + + +def stop_docker(): + # stop it + readableCmd = "docker stop greedy_elephant" + command = shlex.split(readableCmd) + + cwd = pathlib.Path(__file__).resolve().parent + + print("stopping docker with command:", readableCmd) + subprocess.run(command, cwd=cwd) + # remove it + readableCmd = "docker rm greedy_elephant" + command = shlex.split(readableCmd) + + cwd = pathlib.Path(__file__).resolve().parent + + print("stopping docker with command:", readableCmd) + subprocess.run(command, cwd=cwd) + + +def update_docker(): + readableCmd = "docker pull projectelephant/server" + print(readableCmd) + command = shlex.split(readableCmd) + + cwd = pathlib.Path(__file__).resolve().parent + + subprocess.run(command, cwd=cwd) diff --git a/brats_toolkit/util/filemanager.py b/brats_toolkit/util/filemanager.py new file mode 100755 index 0000000..891df36 --- /dev/null +++ b/brats_toolkit/util/filemanager.py @@ -0,0 +1,230 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +"""Module containing functions to manage the BRATS files + +""" +# Author: Christoph Berger +# Script for evaluation and bulk segmentation of Brain Tumor Scans +# using the MICCAI BRATS algorithmic repository +# +# Please refer to README.md and LICENSE.md for further documentation +# This software is not certified for clinical use. + +import os +import numpy as np +import glob +import shutil +import fnmatch +from . import own_itk as oitk + +modalities = ['fla','t1','t1c','t2'] + +def bratsNormalize(image=None, bm=None, bias=0.0001): + ''' + Provides normalization of BRATS images to the + intensity interval 0...1 and ensures that the background is + entirely 0 + The bias value ensures that no brain voxel becomes 0 + ''' + if bm is None or image is None: + print('[Normalize][Error] You have to pass an image and a corresponding brain mask!') + return None + if bm.shape != image.shape: + print('[Normalize][Error] Your image and mask dimensions don\'t match!') + return None + # set bg to zero before calculating anything + image = np.multiply(image,bm) + # shift range to 0...x + image = image - image.min() + # add bias value to avoid 0 voxels + image += bias + # adjust range to bias...1 + image = np.divide(image, image.max()) + # nultiply with mask again to get a bg of 0 + image = np.multiply(image, bm) + return image + +def loadGT(path, patid, file='gt.nii.gz', verbose=True): + """ Loads the Ground Truth for a specified patient + from a given Ground Truth root directory + In: dir, path to the GT folder + patid, patient ID + verbose: True/False terminal output + Out: itk image! -> convert to numpy array + """ + for directory in os.listdir(path): + if not os.path.isdir(os.path.join(path, directory)): + continue # Not a directory + if patid == directory: + if verbose: + print('Loading GT for Patient', patid, 'now..') + patpath = os.path.join(path, patid) + #TODO change loading to support GT for different methods + groundtruth = oitk.get_itk_image(os.path.join(patpath, file)) + break + return groundtruth + +def convertLabels(originalFile, oldlabels, newlabels=[0,1,2,4]): + proto_img = oitk.get_itk_image(originalFile) + labelfile = oitk.get_itk_array(proto_img) + # segm_im = oitk.make_itk_image(proto_image, proto_image) + converted = np.zeros(labelfile.shape) + for oldlabel, newlabel in zip(oldlabels, newlabels): + converted[labelfile == oldlabel] = newlabel + oitk.write_itk_image(oitk.make_itk_image(converted, proto_img), originalFile) + +def fileFinder(srcPath, filetofind, func=convertLabels, verbose=True): + """ finds a file starting from the source path in subdirectories + and runs an arbitrary function on them + """ + if verbose: + print(srcPath) + print(filetofind) + for filename in glob.iglob(srcPath+'/**/'+filetofind, recursive=True): + func(filename, [0,1,2,4], [0,2,1,4]) + +def touchAndConvert(originalFile, gt, verbose=True): + """ Loads the ITK image and saves it with proper + header data (and conversion to 8-bit unint) + """ + proto_img = oitk.get_itk_image(originalFile) + labelfile = oitk.get_itk_array(proto_img) + segm_img = oitk.make_itk_image(labelfile, gt) + oitk.write_itk_image(segm_img, originalFile) + +def fileIterator(directory, gt_root, verbose=True): + for patient in os.listdir(directory): + patpath = os.path.join(directory, patient) + if not os.path.isdir(patpath): + continue # Not a directory + if 'brats' in patient: + #loads itk ground truth + gt = loadGT(gt_root, patient, file='gt.nii') + if verbose: + print ('Current patient:', patient) + # loop through patient folder + for result in os.listdir(patpath): + if not os.path.isdir(os.path.join(patpath, result)): + continue # Not a directory + respath = os.path.join(patpath, result) + paths = os.listdir(respath) + for result in paths: + # if there is a results file, run the conversion + if fnmatch.fnmatch(result, '*.nii*'): + if verbose: + print('Will convert the following file:', result) + touchAndConvert(os.path.join(respath, result), gt, True) + +def remove_nii(root): + for fn in os.listdir(root): + if not os.path.isdir(os.path.join(root, fn)): + continue # Not a directory + if 'brats' in fn: + files = os.listdir(os.path.join(root, fn)) + subdir = os.path.join(root, fn) + for file in files: + if file+'.nii' in modalities: + os.remove(os.path.join(subdir, file+'.nii')) + +def create_files(root, gz=False): + # create nii.gz versions from nii for compatibility + print(root) + for fn in os.listdir(root): + if not os.path.isdir(os.path.join(root, fn)): + continue # Not a directory + if 'brats' in fn: + # files = os.listdir(os.path.join(root, fn)) + for file in modalities: + path = os.path.join(os.path.join(root, fn), file) + proto_image = oitk.get_itk_image(path+str('.nii')) + # segm_im = oitk.make_itk_image(proto_image, proto_image) + oitk.write_itk_image(proto_image, path+str('.nii.gz')) + +def clean(root, gz=False, dir=False): + """ Removes all subfolders and leaves only .nii and .nii.gz input + files untouched + root: path to folder with subfolers + gz: If True, compressed Nifti files are also removed + """ + # Remove subfolders + for fn in os.listdir(root): + subdir = os.path.join(root, fn) + if not os.path.isdir(subdir): + continue + for file in os.listdir(subdir): + if dir and os.path.isdir(os.path.join(subdir, file)): + shutil.rmtree(os.path.join(subdir, file)) + if gz and '.nii.gz' in file: + os.remove(os.path.join(subdir, file)) + +def validate_files(root): + """ Checks if all input directories contain the right files + """ + print('Looking for BRATS data directory..') + for fn in os.listdir(root): + if not os.path.isdir(os.path.join(root, fn)): + continue # Not a directory + if 'brats' in fn: + print('Found pat data:', fn) + print('Checking data validity now') + files = os.listdir(os.path.join(root, fn)) + if not set(modalities).issubset(files): + print('Not all required files are present!') + return False + print('File check okay!') + return True + +def rename_flair(root): + """ Renames flair.nii files to fla.nii if required + """ + for fn in os.listdir(root): + if not os.path.isdir(os.path.join(root, fn)): + continue # Not a directory + if 'brats' in fn: + files = os.listdir(os.path.join(root, fn)) + subdir = os.path.join(root, fn) + for file in files: + if 'flair' in file: + os.rename(os.path.join(subdir, file), + os.path.join(subdir, file.replace('flair', 'fla'))) + +def rename_fla(root): + """ Renames fla.nii files to flair.nii if required + """ + for fn in os.listdir(root): + if not os.path.isdir(os.path.join(root, fn)): + continue # Not a directory + if 'brats' in fn: + files = os.listdir(os.path.join(root, fn)) + subdir = os.path.join(root, fn) + for file in files: + if 'fla.nii' == file: + os.rename(os.path.join(subdir, file), + os.path.join(subdir, + file.replace('fla.nii', 'flair.nii'))) + if 'fla.nii.gz' == file: + os.rename(os.path.join(subdir, file), + os.path.join(subdir, + file.replace('fla.nii.gz', 'flair.nii.gz'))) + +def reduce_filesize(root, gz=False): + # create nii.gz versions from nii for compatibility + for fn in os.listdir(root): + if not os.path.isdir(os.path.join(root, fn)): + continue # Not a directory + if 'brats' in fn: + # files = os.listdir(os.path.join(root, fn)) + for file in modalities: + path = os.path.join(os.path.join(root, fn), file) + proto_image = oitk.get_itk_image(path+str('.nii')) + # segm_im = oitk.make_itk_image(proto_image, proto_image) + oitk.write_itk_image(proto_image, path+str('.nii.gz')) + +def completeclean(root): + # maybe remove the root-results folder as well + clean(root, False, True) + +def conversion(segmentations, verbose=True): + gt_root = '/Users/christoph/Documents/Uni/Bachelorarbeit/Testdaten/testing_nii_LABELS' + #segmentations = '/Users/christoph/Documents/Uni/Bachelorarbeit/Testdaten/Complete_Results' + fileIterator(segmentations, gt_root) diff --git a/brats_toolkit/util/own_itk.py b/brats_toolkit/util/own_itk.py new file mode 100755 index 0000000..7993198 --- /dev/null +++ b/brats_toolkit/util/own_itk.py @@ -0,0 +1,270 @@ +# -*- coding: UTF-8 -*- +"""Module containing functions enabling to read, make and +write ITK images. + +""" +__version__ = '0.2' +__author__ = 'Esther Alberts' + +import os +import numpy as np +import SimpleITK as itk + +def reduce_arr_dtype(arr, verbose=False): + """ Change arr.dtype to a more memory-efficient dtype, without changing + any element in arr. """ + + if np.all(arr-np.asarray(arr,'uint8') == 0): + if arr.dtype != 'uint8': + if verbose: + print('Converting '+str(arr.dtype)+' to uint8 np.ndarray') + arr = np.asarray(arr, dtype='uint8') + elif np.all(arr-np.asarray(arr,'int8') == 0): + if arr.dtype != 'int8': + if verbose: + print('Converting '+str(arr.dtype)+' to int8 np.ndarray') + arr = np.asarray(arr, dtype='int8') + elif np.all(arr-np.asarray(arr,'uint16') == 0): + if arr.dtype != 'uint16': + if verbose: + print('Converting '+str(arr.dtype)+' to uint16 np.ndarray') + arr = np.asarray(arr, dtype='uint16') + elif np.all(arr-np.asarray(arr,'int16') == 0): + if arr.dtype != 'int16': + if verbose: + print('Converting '+str(arr.dtype)+' to int16 np.ndarray') + arr = np.asarray(arr, dtype='int16') + + return arr + +def make_itk_image(arr, proto_image=None, verbose=True): + """Create an itk image given an image array. + + Parameters + ---------- + arr : ndarray + Array to create an itk image with. + proto_image : itk image, optional + Proto itk image to provide Origin, Spacing and Direction. + + Returns + ------- + image : itk image + The itk image containing the input array `arr`. + + """ + + arr = reduce_arr_dtype(arr, verbose=verbose) + + image = itk.GetImageFromArray(arr) + if proto_image != None: + image.CopyInformation(proto_image) + + return image + +def write_itk_image(image, path): + """Write an itk image to a path. + + Parameters + ---------- + image : itk image or np.ndarray + Image to be written. + path : str + Path where the image should be written to. + + """ + + if isinstance(image, np.ndarray): + image = make_itk_image(image) + + writer = itk.ImageFileWriter() + writer.SetFileName(path) + + if os.path.splitext(path)[1] == '.nii': + Warning('You are converting nii, ' + \ + 'be careful with type conversions') + + writer.Execute(image) + +def get_itk_image(path_or_image): + """Get an itk image given a path. + + Parameters + ---------- + path : str or itk.Image + Path pointing to an image file with extension among + *TIFF, JPEG, PNG, BMP, DICOM, GIPL, Bio-Rad, LSM, Nifti (.nii and .nii.gz), + Analyze, SDT/SPR (Stimulate), Nrrd or VTK images*. + + Returns + ------- + image : itk image + The itk image. + + """ + if isinstance(path_or_image, itk.Image): + return path_or_image + + if not os.path.exists(path_or_image): + err = path_or_image + ' doesnt exist' + raise AttributeError(err) + + reader = itk.ImageFileReader() + reader.SetFileName(path_or_image) + + image = reader.Execute() + + return image + +def get_itk_array(path_or_image): + """ Get an image array given a path or itk image. + + Parameters + ---------- + path_or_image : str or itk image + Path pointing to an image file with extension among + *TIFF, JPEG, PNG, BMP, DICOM, GIPL, Bio-Rad, LSM, Nifti (.nii and .nii.gz), + Analyze, SDT/SPR (Stimulate), Nrrd or VTK images* or an itk image. + + Returns + ------- + arr : ndarray + Image ndarray contained in the given path or the itk image. + + """ + + if isinstance(path_or_image, np.ndarray): + return path_or_image + + elif isinstance(path_or_image, str): + image = get_itk_image(path_or_image) + + elif isinstance(path_or_image, itk.Image): + image = path_or_image + + else: + err = 'Image type not recognized: ' + str(type(path_or_image)) + raise RuntimeError(err) + + arr = itk.GetArrayFromImage(image) + + return arr + +def copy_image_info(input_path, ref_path): + """ Copy origin, direction and spacing information from ref_path + into the header in input_path. """ + + print('OVerwriting '+input_path[-50:]) + + ref_im = get_itk_image(ref_path) + im = get_itk_image(input_path) + + dim = im.GetSize() + if dim != ref_im.GetSize(): + err = 'Images are not of same dimension, I will not copy image info!' + raise RuntimeError(err) + + im.SetOrigin(ref_im.GetOrigin()) + im.SetDirection(ref_im.GetDirection()) + im.SetSpacing(ref_im.GetSpacing()) + + if im.GetSize() != dim: + err = 'Dimension changed during copying image info: aborting' + raise RuntimeError(err) + + write_itk_image(im, input_path) + +def load_arr_from_paths(paths): + """ For every str in paths (paths can consis of nested lists), + load the image at this path. If any str is not a path, an error + is thrown. All other objects are preserved. """ + + if isinstance(paths, str): + im_arrs = get_itk_array(paths) + elif isinstance(paths, (list, tuple)): + for i, sub_paths in enumerate(paths): + paths[i] = load_arr_from_paths(sub_paths) + im_arrs = paths + else: + im_arrs = paths + + return im_arrs + +def get_itk_data(path_or_image, verbose=False): + """Get the image array, image size and pixel dimensions given an itk + image or a path. + + Parameters + ---------- + path_or_image : str or itk image + Path pointing to an image file with extension among + *TIFF, JPEG, PNG, BMP, DICOM, GIPL, Bio-Rad, LSM, Nifti (.nii and .nii.gz), + Analyze, SDT/SPR (Stimulate), Nrrd or VTK images* or an itk image. + verbose : boolean, optional + If true, print image shape, spacing and data type of the image + corresponding to `path_or_image.` + + Returns + ------- + arr : ndarray + Image array contained in the given path or the itk image. + shape : tuple + Shape of the image array contained in the given path or the itk + image. + spacing : tuple + Pixel spacing (resolution) of the image array contained in the + given path or the itk image. + + """ + + if isinstance(path_or_image, np.ndarray): + arr = path_or_image + spacing = None + else: + if isinstance(path_or_image, str): + image = get_itk_image(path_or_image) + else: + image = path_or_image + arr = itk.GetArrayFromImage(image) + spacing = image.GetSpacing()[::-1] + + shape = arr.shape + data_type = arr.dtype + + if verbose: + + print('\t image shape: ' + str(shape)) + print('\t image spacing: ' + str(spacing)) + print('\t image data type: ' + str(data_type)) + + return arr, shape, spacing + +def read_dicom(source_path, verbose=True): + '''Reads dicom series into an itk image. + + Parameters + ---------- + source_path : string + path to directory containing dicom series. + verbose : boolean + print out all series file names. + + Returns + ------- + image : itk image + image volume. + ''' + + reader = itk.ImageSeriesReader() + names = reader.GetGDCMSeriesFileNames(source_path) + if len(names) < 1: + raise IOError('No Series can be found at the specified path!') + elif verbose: + print('image series with %d dicom files found in : %s' \ + % (len(names), source_path[-50:])) + reader.SetFileNames(names) + image = reader.Execute() + if verbose: + get_itk_data(image, verbose=True) + + return image diff --git a/brats_toolkit/util/prep_utils.py b/brats_toolkit/util/prep_utils.py new file mode 100644 index 0000000..44cbf60 --- /dev/null +++ b/brats_toolkit/util/prep_utils.py @@ -0,0 +1,14 @@ +from pathlib import Path +import shutil +import os + + +def tempFiler(orgFilePath, modality, tempFolder): + stemName = Path(orgFilePath).stem + stemName = stemName.rsplit('.', 2)[0] + stemName = stemName + "_" + modality + ".nii.gz" + + tempFile = os.path.join(tempFolder, stemName) + # print("tempFile:", tempFile) + shutil.copyfile(orgFilePath, tempFile) + return tempFile diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000..6dd35f6 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,58 @@ +[build-system] +requires = ["setuptools>=61.0", "setuptools_scm[toml]>=6.2"] + +[tool.setuptools] +packages = ["brats_toolkit"] + +[tool.setuptools.dynamic] +#version = {attr = "my_package.VERSION"} +readme = {file = "README.md"} + +[tool.setuptools_scm] +write_to = "brats_toolkit/_version.py" + +[project] +name = "BraTS-Toolkit" +dynamic = ["version"] +authors = [ + {name="Florian Kofler", email="florian.kofler@tum.de"}, + {name="Christoph Berger", email="c.berger@tum.de"}, + {name="Isra Mekki", email="isra.mekki@helmholtz-muenchen.de"}, + {name="Mahyar Valizadeh", email="mahyar.valizadeh@helmholtz-muenchen.de"}, +] +maintainers = [ + {name="Florian Kofler", email="florian.kofler@tum.de"}, + {name="Isra Mekki", email="isra.mekki@helmholtz-muenchen.de"}, + {name="Mahyar Valizadeh", email="mahyar.valizadeh@helmholtz-muenchen.de"}, +] +description = "TODO" +keywords = ["todo", "todo"] +readme = "README.md" +# requires-python = "==3.10" +classifiers = [ + "Programming Language :: Python :: 3", + "License :: OSI Approved :: GNU Affero General Public License", + "Operating System :: OS Independent", +] +dependencies = [ + 'SimpleITK==2.1.1.2', + 'numpy==1.22.0', + 'python-engineio==3.14.2', + 'python-socketio==4.6.1', + 'requests==2.24.0' +] + +[project.optional-dependencies] +dev = [ + "pytest", +] + +[project.scripts] +brats-segment = "brats_toolkit.cli:segmentation" +brats-fuse = "brats_toolkit.cli:fusion" +brats-batch-preprocess = "brats_toolkit.cli:batchpreprocess" +brats-preprocess = "brats_toolkit.cli:singlepreprocess" + +[project.urls] +# documentation = "TODO" +repository = "https://github.com/neuronflow/BraTS-Toolkit" \ No newline at end of file diff --git a/references.bib b/references.bib new file mode 100644 index 0000000..224ffa5 --- /dev/null +++ b/references.bib @@ -0,0 +1,109 @@ +%% This BibTeX bibliography file was created using BibDesk. +%% https://bibdesk.sourceforge.io/ + +%% Created for Christoph Berger at 2020-08-05 09:01:33 +0200 + + +%% Saved with string encoding Unicode (UTF-8) + + + +@inproceedings{weninger2019, + Abstract = {Brain tumor localization and segmentation is an important step in the treatment of brain tumor patients. It is the base for later clinical steps, e.g., a possible resection of the tumor. Hence, an automatic segmentation algorithm would be preferable, as it does not suffer from inter-rater variability. On top, results could be available immediately after the brain imaging procedure. Using this automatic tumor segmentation, it could also be possible to predict the survival of patients. The BraTS 2018 challenge consists of these two tasks: tumor segmentation in 3D-MRI images of brain tumor patients and survival prediction based on these images. For the tumor segmentation, we utilize a two-step approach: First, the tumor is located using a 3D U-net. Second, another 3D U-net -- more complex, but with a smaller output size -- detects subtle differences in the tumor volume, i.e., it segments the located tumor into tumor core, enhanced tumor, and peritumoral edema.}, + Address = {Cham}, + Author = {Weninger, Leon and Rippel, Oliver and Koppers, Simon and Merhof, Dorit}, + Booktitle = {Brainlesion: Glioma, Multiple Sclerosis, Stroke and Traumatic Brain Injuries}, + Date-Added = {2020-08-05 08:59:50 +0200}, + Date-Modified = {2020-08-05 09:01:00 +0200}, + Editor = {Crimi, Alessandro and Bakas, Spyridon and Kuijf, Hugo and Keyvan, Farahani and Reyes, Mauricio and van Walsum, Theo}, + Isbn = {978-3-030-11726-9}, + Pages = {3--12}, + Publisher = {Springer International Publishing}, + Title = {Segmentation of Brain Tumors and Patient Survival Prediction: Methods for the BraTS 2018 Challenge}, + Year = {2019}} + +@inproceedings{nuechterlein2019, + Abstract = {Automatic quantitative analysis of structural magnetic resonance (MR) images of brain tumors is critical to the clinical care of glioma patients, and for the future of advanced MR imaging research. In particular, automatic brain tumor segmentation can provide volumes of interest (VOIs) to scale the analysis of advanced MR imaging modalities such as perfusion-weighted imaging (PWI), diffusion-weighted imaging (DTI), and MR spectroscopy (MRS), which is currently hindered by the prohibitive cost and time of manual segmentations. However, automatic brain tumor segmentation is complicated by the high heterogeneity and dimensionality of MR data, and the relatively small size of available datasets. This paper extends ESPNet, a fast and efficient network designed for vanilla 2D semantic segmentation, to challenging 3D data in the medical imaging domain [11]. Even without substantive pre- and post-processing, our model achieves respectable brain tumor segmentation results, while learning only 3.8 million parameters. 3D-ESPNet achieves dice scores of 0.850, 0.665, and 0.782 on whole tumor, enhancing tumor, and tumor core classes on the test set of the 2018 BraTS challenge [1--4, 12]. Our source code is open-source and available at https://github.com/sacmehta/3D-ESPNet.}, + Address = {Cham}, + Author = {Nuechterlein, Nicholas and Mehta, Sachin}, + Booktitle = {Brainlesion: Glioma, Multiple Sclerosis, Stroke and Traumatic Brain Injuries}, + Date-Added = {2020-08-05 08:59:42 +0200}, + Date-Modified = {2020-08-05 09:00:27 +0200}, + Editor = {Crimi, Alessandro and Bakas, Spyridon and Kuijf, Hugo and Keyvan, Farahani and Reyes, Mauricio and van Walsum, Theo}, + Isbn = {978-3-030-11726-9}, + Pages = {245--253}, + Publisher = {Springer International Publishing}, + Title = {3D-ESPNet with Pyramidal Refinement for Volumetric Brain Tumor Image Segmentation}, + Year = {2019}} + +@inproceedings{zhao2019, + Abstract = {Despite remarkable progress, 3D whole brain segmentation of structural magnetic resonance imaging (MRI) into a large number of regions (>100) is still difficult due to the lack of annotated data and the limitation of GPU memory. To address these challenges, we propose a semi-supervised segmentation method based on deep neural networks to exploit the plenty of unlabeled data by extending the self-training method, and improve the U-Net model by designing a novel self-ensemble architecture and a random patch-size training strategy. Further, to reduce the model storage and computational cost, we get a compact model by knowledge distillation. Extensive experiments conducted on the MICCAI 2012 dataset demonstrate that our method dramatically outperforms previous methods and has achieved the state-of-the-art performance. Our compact model segments an MRI image within 3 s on a TITAN X GPU, which is much faster than multi-atlas based methods and previous deep learning methods.}, + Address = {Cham}, + Author = {Zhao, Yuan-Xing and Zhang, Yan-Ming and Song, Ming and Liu, Cheng-Lin}, + Booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2019}, + Date-Added = {2020-08-05 08:59:38 +0200}, + Date-Modified = {2020-08-05 09:00:49 +0200}, + Editor = {Shen, Dinggang and Liu, Tianming and Peters, Terry M. and Staib, Lawrence H. and Essert, Caroline and Zhou, Sean and Yap, Pew-Thian and Khan, Ali}, + Isbn = {978-3-030-32248-9}, + Pages = {256--265}, + Publisher = {Springer International Publishing}, + Title = {Multi-view Semi-supervised 3D Whole Brain Segmentation with a Self-ensemble Network}, + Year = {2019}} + +@inproceedings{mckinley2020triplanar, + Abstract = {We introduce a modification of our previous 3D-to-2D fully convolutional architecture, DeepSCAN, replacing batch normalization with instance normalization, and adding a lightweight local attention mechanism. These networks are trained using a previously described loss function which mo els label noise and uncertainty. We present results on the validation dataset of the Multimodal Brain Tumor Segmentation Challenge 2019.}, + Address = {Cham}, + Author = {McKinley, Richard and Rebsamen, Michael and Meier, Raphael and Wiest, Roland}, + Booktitle = {Brainlesion: Glioma, Multiple Sclerosis, Stroke and Traumatic Brain Injuries}, + Date-Added = {2020-08-05 08:59:34 +0200}, + Date-Modified = {2020-08-05 09:01:14 +0200}, + Editor = {Crimi, Alessandro and Bakas, Spyridon}, + Isbn = {978-3-030-46640-4}, + Pages = {379--387}, + Publisher = {Springer International Publishing}, + Title = {Triplanar Ensemble of 3D-to-2D CNNs with Label-Uncertainty for Brain Tumor Segmentation}, + Year = {2020}} + +@inproceedings{isensee2018no, + Author = {Isensee, Fabian and Kickingereder, Philipp and Wick, Wolfgang and Bendszus, Martin and Maier-Hein, Klaus H}, + Booktitle = {International MICCAI Brainlesion Workshop}, + Date-Added = {2020-08-05 08:52:02 +0200}, + Date-Modified = {2020-08-05 08:52:02 +0200}, + Organization = {Springer}, + Pages = {234--244}, + Title = {No new-net}, + Year = {2018}} + +@inproceedings{mckinley2018ensembles, + Author = {McKinley, Richard and Meier, Raphael and Wiest, Roland}, + Booktitle = {International MICCAI Brainlesion Workshop}, + Date-Added = {2020-08-05 08:51:36 +0200}, + Date-Modified = {2020-08-05 08:51:36 +0200}, + Organization = {Springer}, + Pages = {456--465}, + Title = {Ensembles of densely-connected CNNs with label-uncertainty for brain tumor segmentation}, + Year = {2018}} + +@inproceedings{feng2019, + Abstract = {Accurate segmentation of different sub-regions of gliomas including peritumoral edema, necrotic core, enhancing and non-enhancing tumor core from multimodal MRI scans has important clinical relevance in diagnosis, prognosis and treatment of brain tumors. However, due to the highly heterogeneous appearance and shape, segmentation of the sub-regions is very challenging. Recent development using deep learning models has proved its effectiveness in the past several brain segmentation challenges as well as other semantic and medical image segmentation problems. Most models in brain tumor segmentation use a 2D/3D patch to predict the class label for the center voxel and variant patch sizes and scales are used to improve the model performance. However, it has low computation efficiency and also has limited receptive field. U-Net is a widely used network structure for end-to-end segmentation and can be used on the entire image or extracted patches to provide classification labels over the entire input voxels so that it is more efficient and expect to yield better performance with larger input size. Furthermore, instead of picking the best network structure, an ensemble of multiple models, trained on different dataset or different hyper-parameters, can generally improve the segmentation performance. In this study we propose to use an ensemble of 3D U-Nets with different hyper-parameters for brain tumor segmentation. Preliminary results showed effectiveness of this model. In addition, we developed a linear model for survival prediction using extracted imaging and non-imaging features, which, despite the simplicity, can effectively reduce overfitting and regression errors.}, + Address = {Cham}, + Author = {Feng, Xue and Tustison, Nicholas and Meyer, Craig}, + Booktitle = {Brainlesion: Glioma, Multiple Sclerosis, Stroke and Traumatic Brain Injuries}, + Date-Added = {2020-08-05 08:51:31 +0200}, + Date-Modified = {2020-08-05 09:00:41 +0200}, + Editor = {Crimi, Alessandro and Bakas, Spyridon and Kuijf, Hugo and Keyvan, Farahani and Reyes, Mauricio and van Walsum, Theo}, + Isbn = {978-3-030-11726-9}, + Pages = {279--288}, + Publisher = {Springer International Publishing}, + Title = {Brain Tumor Segmentation Using an Ensemble of 3D U-Nets and Overall Survival Prediction Using Radiomic Features}, + Year = {2019}} + +@inproceedings{marcinkiewicz2018segmenting, + Author = {Marcinkiewicz, Michal and Nalepa, Jakub and Lorenzo, Pablo Ribalta and Dudzik, Wojciech and Mrukwa, Grzegorz}, + Booktitle = {International MICCAI Brainlesion Workshop}, + Date-Added = {2020-08-05 08:39:37 +0200}, + Date-Modified = {2020-08-05 08:39:37 +0200}, + Organization = {Springer}, + Pages = {13--24}, + Title = {Segmenting brain tumors from MRI using cascaded multi-modal U-Nets}, + Year = {2018}} From de5ca95f3365d04575adce5ab88a3282e32fc5c0 Mon Sep 17 00:00:00 2001 From: Isra Mekki <98828872+IsraMekki0@users.noreply.github.com> Date: Fri, 27 Jan 2023 12:50:38 +0100 Subject: [PATCH 02/10] Delete src directory --- src/brats-toolkit | 1 - 1 file changed, 1 deletion(-) delete mode 160000 src/brats-toolkit diff --git a/src/brats-toolkit b/src/brats-toolkit deleted file mode 160000 index b7d8d94..0000000 --- a/src/brats-toolkit +++ /dev/null @@ -1 +0,0 @@ -Subproject commit b7d8d94322802779f33ba76261baeac77e68091f From 6abcc5a2aa301d3df32297ddea39aba681deff2d Mon Sep 17 00:00:00 2001 From: Isra Mekki <98828872+IsraMekki0@users.noreply.github.com> Date: Fri, 27 Jan 2023 14:06:38 +0100 Subject: [PATCH 03/10] Create release.yml --- .github/workflows/release.yml | 31 +++++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) create mode 100644 .github/workflows/release.yml diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml new file mode 100644 index 0000000..40cbeeb --- /dev/null +++ b/.github/workflows/release.yml @@ -0,0 +1,31 @@ +name: release + +on: + release: + types: [created] + workflow_dispatch: + +jobs: + publish: + needs: test + name: Publish to test PyPI + runs-on: ubuntu-latest + steps: + - name: Checkout Repository + uses: actions/checkout@v3 + - name: Set up Python + uses: actions/setup-python@v4 + with: + python-version: '3.x' + - name: Install dependencies + run: | + pip install build + pip install twine + - name: Build package + run: python -m build + - name: Upload to PyPI + env: + TWINE_USERNAME: __token__ + TWINE_PASSWORD: ${{ secrets.PYPI_TEST_API_TOKEN }} + run: | + twine upload --repository-url=https://test.pypi.org/legacy/ dist/* From 9841389f5e568afa8867f08419083d2f65bcf43f Mon Sep 17 00:00:00 2001 From: Isra Mekki <98828872+IsraMekki0@users.noreply.github.com> Date: Fri, 27 Jan 2023 14:09:14 +0100 Subject: [PATCH 04/10] Update release.yml --- .github/workflows/release.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 40cbeeb..8ab0c18 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -7,7 +7,6 @@ on: jobs: publish: - needs: test name: Publish to test PyPI runs-on: ubuntu-latest steps: From b179b40d609c8077bfb0d1e36787df218d936f58 Mon Sep 17 00:00:00 2001 From: Isra Mekki <98828872+IsraMekki0@users.noreply.github.com> Date: Fri, 27 Jan 2023 14:13:53 +0100 Subject: [PATCH 05/10] Delete _version.py --- brats_toolkit/_version.py | 4 ---- 1 file changed, 4 deletions(-) delete mode 100644 brats_toolkit/_version.py diff --git a/brats_toolkit/_version.py b/brats_toolkit/_version.py deleted file mode 100644 index 2337850..0000000 --- a/brats_toolkit/_version.py +++ /dev/null @@ -1,4 +0,0 @@ -# file generated by setuptools_scm -# don't change, don't track in version control -__version__ = version = '0.1.dev37+gbee34b1.d20230127' -__version_tuple__ = version_tuple = (0, 1, 'dev37', 'gbee34b1.d20230127') From fd80a4c02132cd4878ee92650f4523eb08e1bd91 Mon Sep 17 00:00:00 2001 From: Isra Mekki <98828872+IsraMekki0@users.noreply.github.com> Date: Fri, 27 Jan 2023 14:17:26 +0100 Subject: [PATCH 06/10] Corrected license --- pyproject.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 6dd35f6..38c2ab8 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -31,7 +31,7 @@ readme = "README.md" # requires-python = "==3.10" classifiers = [ "Programming Language :: Python :: 3", - "License :: OSI Approved :: GNU Affero General Public License", + "License :: OSI Approved :: GNU Affero General Public License v3", "Operating System :: OS Independent", ] dependencies = [ @@ -55,4 +55,4 @@ brats-preprocess = "brats_toolkit.cli:singlepreprocess" [project.urls] # documentation = "TODO" -repository = "https://github.com/neuronflow/BraTS-Toolkit" \ No newline at end of file +repository = "https://github.com/neuronflow/BraTS-Toolkit" From 3563000a29890056a38776d9b19f8d0be0add921 Mon Sep 17 00:00:00 2001 From: neuronflow <7048826+neuronflow@users.noreply.github.com> Date: Sat, 28 Jan 2023 09:24:02 +0100 Subject: [PATCH 07/10] Update pyproject.toml keywords --- pyproject.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 38c2ab8..d42c37a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -25,8 +25,8 @@ maintainers = [ {name="Isra Mekki", email="isra.mekki@helmholtz-muenchen.de"}, {name="Mahyar Valizadeh", email="mahyar.valizadeh@helmholtz-muenchen.de"}, ] -description = "TODO" -keywords = ["todo", "todo"] +description = "BraTS Toolkit is a holistic approach to brain tumor segmentation and consists of three components: First, the BraTS Preprocessor facilitates data standardization and preprocessing for researchers and clinicians alike. It covers the entire image analysis workflow prior to tumor segmentation, from image conversion and registration to brain extraction. Second, BraTS Segmentor enables orchestration of BraTS brain tumor segmentation algorithms for generation of fully-automated segmentations. Finally, Brats Fusionator can combine the resulting candidate segmentations into consensus segmentations using fusion methods such as majority voting and iterative SIMPLE fusion." +keywords = ["brain tumor", "glioma", "BraTS", "segmentation", "fusion", "skullstripping", "brain extraction"] readme = "README.md" # requires-python = "==3.10" classifiers = [ From 3c12182adb8dea7893ce9333bc4770fcb64dd44c Mon Sep 17 00:00:00 2001 From: neuronflow <7048826+neuronflow@users.noreply.github.com> Date: Tue, 31 Jan 2023 10:49:29 +0100 Subject: [PATCH 08/10] Update pyproject.toml shorter desc --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index d42c37a..0a441da 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -25,7 +25,7 @@ maintainers = [ {name="Isra Mekki", email="isra.mekki@helmholtz-muenchen.de"}, {name="Mahyar Valizadeh", email="mahyar.valizadeh@helmholtz-muenchen.de"}, ] -description = "BraTS Toolkit is a holistic approach to brain tumor segmentation and consists of three components: First, the BraTS Preprocessor facilitates data standardization and preprocessing for researchers and clinicians alike. It covers the entire image analysis workflow prior to tumor segmentation, from image conversion and registration to brain extraction. Second, BraTS Segmentor enables orchestration of BraTS brain tumor segmentation algorithms for generation of fully-automated segmentations. Finally, Brats Fusionator can combine the resulting candidate segmentations into consensus segmentations using fusion methods such as majority voting and iterative SIMPLE fusion." +description = "BraTS Toolkit is a holistic approach to brain tumor segmentation allowing to build modular pipeliens for preprocessing, segmentation and fusion of segmentations." keywords = ["brain tumor", "glioma", "BraTS", "segmentation", "fusion", "skullstripping", "brain extraction"] readme = "README.md" # requires-python = "==3.10" From 10627f236b274d08a203218fda6ffa5addb0b1b2 Mon Sep 17 00:00:00 2001 From: Isra Mekki <98828872+IsraMekki0@users.noreply.github.com> Date: Tue, 31 Jan 2023 11:04:02 +0100 Subject: [PATCH 09/10] Update release.yml --- .github/workflows/release.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 8ab0c18..bca21a7 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -25,6 +25,6 @@ jobs: - name: Upload to PyPI env: TWINE_USERNAME: __token__ - TWINE_PASSWORD: ${{ secrets.PYPI_TEST_API_TOKEN }} + TWINE_PASSWORD: ${{ secrets.PYPI_API_TOKEN }} run: | - twine upload --repository-url=https://test.pypi.org/legacy/ dist/* + twine upload dist/*.whl From ef406dc529d7d87f3a4ebdd11d363a8af491a775 Mon Sep 17 00:00:00 2001 From: Isra Mekki <98828872+IsraMekki0@users.noreply.github.com> Date: Tue, 31 Jan 2023 11:21:18 +0100 Subject: [PATCH 10/10] Delete requirements.txt --- requirements.txt | 1 - 1 file changed, 1 deletion(-) delete mode 100644 requirements.txt diff --git a/requirements.txt b/requirements.txt deleted file mode 100644 index 6e08d9d..0000000 --- a/requirements.txt +++ /dev/null @@ -1 +0,0 @@ --e git+https://github.com/neuronflow/BraTS-Toolkit-Source.git@master#egg=brats_toolkit