diff --git a/.travis.yml b/.travis.yml new file mode 100644 index 0000000..b3ae8ba --- /dev/null +++ b/.travis.yml @@ -0,0 +1,24 @@ +language: python + +sudo: required + +services: +- docker +- privileged: true + +python: +- 2.7 + +install: true + +before_script: +- docker build -t simtool . + +script: +- docker run --privileged --rm=false -v /var/run/docker.sock:/var/run/docker.sock -v $PWD:$PWD -w $PWD/sim simtool ./tests/execute_pytest.sh + +after_success: +- coveralls + +deploy: + diff --git a/Dockerfile b/Dockerfile index 9f4d50c..f7b5bd6 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,11 +1,29 @@ -FROM mesosphere/spark:2.0.0-2.2.0-1-hadoop-2.6 +FROM jupyter/pyspark-notebook:82b978b3ceeb -RUN apt-get -y install docker.io \ - python-setuptools && \ - easy_install pip +USER root +RUN apt-get update && \ + echo 'Y' | apt-get install apt-utils && \ + echo 'Y' | apt-get install curl && \ + curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add - && \ + echo 'Y' | apt install --reinstall base-files lsb-release lsb-base && \ + echo 'Y' | apt-get install software-properties-common && \ + echo 'Y' | apt-get install apt-transport-https && \ + add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $( lsb_release -cs ) stable" && \ + apt-get update && \ + apt-get install -y docker-ce && \ + service docker start +RUN conda create -n simenv python=2.7 pytest py4j==0.10.4 pyspark pytest-cov -RUN pip install boutiques pytest pyspark pybids +ENV PATH /opt/conda/envs/simenv/bin:$PATH -ENTRYPOINT ["pytest"] +RUN /bin/bash -c "source activate simenv" + +ENV PYTHONPATH /opt/conda/envs/python2/lib/python2.7/site-packages:\ + /usr/local/spark-2.2.0-bin-hadoop2.7/python:\ + /opt/conda/envs/python2/bin:$PYTHONPATH + +RUN pip install boutiques pybids duecredit nipype + +ENTRYPOINT ["/bin/bash"] diff --git a/README.md b/README.md index 45f38ca..ca1c7d0 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,6 @@ -[![CircleCI](https://circleci.com/gh/big-data-lab-team/sim/tree/master.svg?style=svg)](https://circleci.com/gh/big-data-lab-team/sim/tree/master) +[![PyPI](https://img.shields.io/pypi/v/simtools.svg)](https://pypi.python.org/pypi/simtools) +[![Build Status](https://travis-ci.org/big-data-lab-team/sim.svg?branch=master)](https://travis-ci.org/big-data-lab-team/sim) +[![Coverage Status](https://coveralls.io/repos/github/big-data-lab-team/sim/badge.svg?branch=master)](https://coveralls.io/github/big-data-lab-team/sim?branch=master) # Spark for neuroIMaging (sim) diff --git a/circle.yml b/circle.yml deleted file mode 100644 index bee873f..0000000 --- a/circle.yml +++ /dev/null @@ -1,21 +0,0 @@ -machine: - pre: - - sudo curl -L -o /usr/bin/docker 'https://s3-external-1.amazonaws.com/circle-downloads/docker-1.9.1-circleci' - - sudo chmod 0755 /usr/bin/docker - - services: - - docker - - privileged: true - -dependencies: - cache_directories: - - "~/docker" - - override: - - docker build -t simtool . : - timeout: 21600 - - mkdir -p ~/docker; docker save "simtool" > ~/docker/image.tar - -test: - override: - - docker run --privileged --rm=false -v /var/run/docker.sock:/var/run/docker.sock -v $PWD:$PWD -w $PWD simtool ./test diff --git a/setup.cfg b/setup.cfg index b88034e..0115b61 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,2 +1,4 @@ [metadata] description-file = README.md +[aliases] +test=pytest \ No newline at end of file diff --git a/setup.py b/setup.py index 527b79a..b88df27 100644 --- a/setup.py +++ b/setup.py @@ -4,7 +4,8 @@ DEPS = [ "boutiques", "pybids", - "pyspark" + "pyspark", + "pytest-runner" ] setup(name="simtools", @@ -16,8 +17,8 @@ license="GPL3.0", packages=["sim"], include_package_data=True, - test_suite="nose.collector", - tests_require=["nose"], + test_suite="pytest", + tests_require=["pytest"], setup_requires=DEPS, install_requires=DEPS, entry_points = { diff --git a/sim/Sim.py b/sim/Sim.py new file mode 100644 index 0000000..239426b --- /dev/null +++ b/sim/Sim.py @@ -0,0 +1,68 @@ +import boutiques, os, time, errno, tarfile, json + +class Sim(object): + + def __init__(self, boutiques_descriptor, input_path, output_dir): + + self.boutiques_descriptor = boutiques_descriptor + self.input_path = input_path + self.output_dir = output_dir + + def create_tar_file(self, out_dir, tar_name, files): + try: + os.makedirs(out_dir) + except OSError as e: + if e.errno != errno.EEXIST: + raise + with tarfile.open(os.path.join(out_dir, tar_name), "w") as tar: + for f in files: + tar.add(f) + + def write_invocation_file(self, invocation, invocation_file): + + # Note: the invocation file format will change soon + json_invocation = json.dumps(invocation) + + # Writes invocation + with open(invocation_file,"w") as f: + f.write(json_invocation) + + def bosh_exec(self, invocation_file, mount=None): + try: + + if mount is None: + boutiques.execute("launch",self.boutiques_descriptor,invocation_file, "-x") + else: + boutiques.execute("launch", self.boutiques_descriptor, invocation_file, "-v", + "{0}:{0}".format(mount), "-x") + result = 0 + except SystemExit as e: + result = e.code + return (result, "Empty log, Boutiques API doesn't return it yet.\n") + + def pretty_print(self, result): + (label, (returncode, log)) = result + status = "SUCCESS" if returncode == 0 else "ERROR" + timestamp = str(int(time.time() * 1000)) + filename = "{0}.{1}.log".format(timestamp, label) + with open(filename,"w") as f: + f.write(log) + print(" [ {0} ({1}) ] {2} - {3}".format(status, returncode, label, filename)) + + def check_failure(self, result): + (label, (returncode, log)) = result + return True if returncode !=0 else False + + + def write_BIDS_invocation(self, analysis_level, participant_label, invocation_file): + + invocation = {} + invocation["bids_dir"] = self.input_path + invocation["output_dir_name"] = self.output_dir + if analysis_level == "participant": + invocation["analysis_level"] = "participant" + invocation["participant_label"] = participant_label + elif analysis_level == "group": + invocation["analysis_level"] = "group" + + self.write_invocation_file(invocation, invocation_file) diff --git a/sim/SparkBIDS.py b/sim/SparkBIDS.py old mode 100644 new mode 100755 index 607cd42..719eb0c --- a/sim/SparkBIDS.py +++ b/sim/SparkBIDS.py @@ -1,13 +1,13 @@ from bids.grabbids import BIDSLayout -import json, os, errno, subprocess, time, tarfile, shutil +import boutiques, errno, json, os, shutil, subprocess, tarfile, time +from Sim import Sim -class SparkBIDS(object): +class SparkBIDS(Sim): def __init__(self, boutiques_descriptor, bids_dataset, output_dir, options={}): - self.boutiques_descriptor = os.path.join(os.path.abspath(boutiques_descriptor)) - self.bids_dataset = bids_dataset - self.output_dir = output_dir + super(SparkBIDS, self).__init__(os.path.abspath(boutiques_descriptor), bids_dataset, output_dir) + # Includes: use_hdfs, skip_participant_analysis, # skip_group_analysis, skip_participants_file @@ -20,6 +20,8 @@ def __init__(self, boutiques_descriptor, bids_dataset, output_dir, options={}): and not self.skip_group_analysis self.skipped_participants = self.skip_participants_file.read().split() if self.skip_participants_file else [] + print(self.skipped_participants) + # Print analysis summary print("Computed Analyses: Participant [ {0} ] - Group [ {1} ]".format(str(self.do_participant_analysis).upper(), str(self.do_group_analysis).upper())) @@ -42,6 +44,11 @@ def run(self, sc): for result in mapped.collect(): self.pretty_print(result) + if self.check_failure(result): + # Disable Group Analysis if Participant Analysis Fails + self.do_group_analysis = False + print("ERROR# Participant analysis failed. Group analysis will be aborted.") + # Group analysis if self.do_group_analysis: @@ -66,7 +73,7 @@ def create_RDD(self, sc): sub_dir="tar_files" - layout = BIDSLayout(self.bids_dataset) + layout = BIDSLayout(self.input_path) participants = layout.get_subjects() # Create RDD of file paths as key and tarred subject data as value @@ -84,46 +91,6 @@ def create_RDD(self, sc): return sc.parallelize(list_participants) - def create_tar_file(self, out_dir, tar_name, files): - try: - os.makedirs(out_dir) - except OSError as e: - if e.errno != errno.EEXIST: - raise - with tarfile.open(os.path.join(out_dir, tar_name), "w") as tar: - for f in files: - tar.add(f) - - def pretty_print(self, result): - (label, (log, returncode)) = result - status = "SUCCESS" if returncode == 0 else "ERROR" - timestamp = str(int(time.time() * 1000)) - filename = "{0}.{1}.log".format(timestamp, label) - with open(filename,"w") as f: - f.write(log) - print(" [ {3} ({0}) ] {1} - {2}".format(returncode, label, filename, status)) - - def write_invocation_file(self, analysis_level, participant_label, invocation_file): - - # Note: the invocation file format will change soon - - # Creates invocation object - invocation = {} - invocation["inputs"] = [ ] - invocation["inputs"].append({"bids_dir": self.bids_dataset}) - invocation["inputs"].append({"output_dir_name": self.output_dir}) - if analysis_level == "participant": - invocation["inputs"].append({"analysis_level": "participant"}) - invocation["inputs"].append({"participant_label": participant_label}) - elif analysis_level == "group": - invocation["inputs"].append({"analysis_level": "group"}) - - json_invocation = json.dumps(invocation) - - # Writes invocation - with open(invocation_file,"w") as f: - f.write(json_invocation) - def get_bids_dataset(self, data, participant_label): filename = 'sub-{0}.tar'.format(participant_label) @@ -141,7 +108,7 @@ def get_bids_dataset(self, data, participant_label): os.remove(filename) - return os.path.join(tmp_dataset, os.path.abspath(self.bids_dataset)) + return os.path.join(tmp_dataset, os.path.abspath(self.input_path)) def run_participant_analysis(self, participant_label, data): @@ -158,42 +125,23 @@ def run_participant_analysis(self, participant_label, data): raise invocation_file = "./invocation-{0}.json".format(participant_label) - self.write_invocation_file("participant", + self.write_BIDS_invocation("participant", participant_label, invocation_file) - exec_result = self.bosh_exec(invocation_file) + exec_result = self.bosh_exec(invocation_file, os.path.dirname(os.path.abspath(self.input_path))) os.remove(invocation_file) return (participant_label, exec_result) def run_group_analysis(self): invocation_file = "./invocation-group.json" - self.write_invocation_file("group", + self.write_BIDS_invocation("group", None, invocation_file) - exec_result = self.bosh_exec(invocation_file) + exec_result = self.bosh_exec(invocation_file, os.path.dirname(os.path.abspath(self.input_path))) os.remove(invocation_file) return ("group", exec_result) - def bosh_exec(self, invocation_file): - run_command = "bosh {0} -i {1} -e -d".format(self.boutiques_descriptor, invocation_file) - result = None - try: - log = subprocess.check_output(run_command, shell=True, stderr=subprocess.STDOUT) - result = (log, 0) - except subprocess.CalledProcessError as e: - result = (e.output, e.returncode) - try: - shutil.rmtree(label) - except: - pass - return result - - def is_valid_file(parser, arg): - if not os.path.exists(arg): - parser.error("The file %s does not exist!" % arg) - else: - return open(arg, 'r') def get_participant_from_fn(self,filename): if filename.endswith(".tar"): return filename.split('-')[-1][:-4] diff --git a/sim/other_wf_examples/nipype/NipBIDS.py b/sim/other_wf_examples/nipype/NipBIDS.py new file mode 100644 index 0000000..2206208 --- /dev/null +++ b/sim/other_wf_examples/nipype/NipBIDS.py @@ -0,0 +1,139 @@ +from nipype import Workflow, MapNode, Node, Function +from nipype.interfaces.utility import IdentityInterface, Function +import os, json, time +from sim import Sim + +class NipBIDS(Sim.Sim): + + def __init__(self, boutiques_descriptor, bids_dataset, output_dir, options={}): + + super(NipBIDS, self).__init__(os.path.abspath(boutiques_descriptor), + os.path.abspath(bids_dataset), + os.path.abspath(output_dir)) + + # Includes: skip_participant_analysis, + # skip_group_analysis, skip_participants_file + for option in list(options.keys()): setattr(self, option, options.get(option)) + + # Check what will have to be done + self.do_participant_analysis = self.supports_analysis_level("participant") \ + and not self.skip_participant_analysis + self.do_group_analysis = self.supports_analysis_level("group") \ + and not self.skip_group_analysis + self.skipped_participants = open(self.skip_participants_file, "r").read().split() if self.skip_participants_file else [] + + + # Print analysis summary + print("Computed Analyses: Participant [ {0} ] - Group [ {1} ]".format(str(self.do_participant_analysis).upper(), + str(self.do_group_analysis).upper())) + + if len(self.skipped_participants): + print("Skipped participants: {0}".format(self.skipped_participants)) + + + def run(self): + + wf = Workflow('bapp') + wf.base_dir = os.getcwd() + + # group analysis can be executed if participant analysis is skipped + p_analysis = None + + # Participant analysis + if self.do_participant_analysis: + + participants = Node(Function(input_names=['nip'], + output_names=['out'], + function=get_participants), + name='get_participants') + participants.inputs.nip = self + + + + p_analysis = MapNode(Function(input_names=['nip', 'analysis_level', + 'participant_label','working_dir'], + output_names=['result'], + function=run_analysis), + iterfield=['participant_label'], + name='run_participant_analysis') + + wf.add_nodes([participants]) + wf.connect(participants, 'out', p_analysis, 'participant_label') + + p_analysis.inputs.analysis_level = 'participant' + p_analysis.inputs.nip = self + p_analysis.inputs.working_dir = os.getcwd() + + + # Group analysis + if self.do_group_analysis: + groups = Node(Function(input_names=['nip', 'analysis_level', + 'working_dir', 'dummy_token'], + output_names=['g_result'], + function=run_analysis), + name='run_group_analysis') + + groups.inputs.analysis_level = 'group' + groups.inputs.nip = self + groups.inputs.working_dir = os.getcwd() + + + if p_analysis is not None: + wf.connect(p_analysis, 'result', groups, 'dummy_token') + else: + wf.add_nodes([groups]) + + eg = wf.run() + + + # Convert to dictionary to more easily extract results + node_names = [i.name for i in eg.nodes()] + result_dict = dict(zip(node_names, eg.nodes())) + + if self.do_participant_analysis: + for res in result_dict['run_participant_analysis'].result.outputs.get('result'): + self.pretty_print(res) + + if self.do_group_analysis: + self.pretty_print(result_dict['run_group_analysis'].result.outputs.g_result) + + + def supports_analysis_level(self,level): + desc = json.load(open(self.boutiques_descriptor)) + analysis_level_input = None + for input in desc["inputs"]: + if input["id"] == "analysis_level": + analysis_level_input = input + break + assert(analysis_level_input),"BIDS app descriptor has no input with id 'analysis_level'" + assert(analysis_level_input.get("value-choices")),"Input 'analysis_level' of BIDS app descriptor has no 'value-choices' property" + return level in analysis_level_input["value-choices"] + +def run_analysis(nip, analysis_level, working_dir, participant_label=None, dummy_token=None): + import os + + out_key = None + + if analysis_level == "group": + invocation_file = "./invocation-group.json" + out_key = "group" + else: + invocation_file = "./invocation-{0}.json".format(participant_label) + out_key = "participant_label" + + nip.write_BIDS_invocation(analysis_level, participant_label, invocation_file) + exec_result = nip.bosh_exec(invocation_file, working_dir) + os.remove(invocation_file) + + return (out_key, exec_result) + + + +def get_participants(nip): + + from bids.grabbids import BIDSLayout + + layout = BIDSLayout(nip.input_path) + participants = layout.get_subjects() + + return list(set(participants) - set(nip.skipped_participants)) diff --git a/sim/other_wf_examples/nipype/README.md b/sim/other_wf_examples/nipype/README.md new file mode 100644 index 0000000..a5b0e40 --- /dev/null +++ b/sim/other_wf_examples/nipype/README.md @@ -0,0 +1,34 @@ +# NipBIDS + +An example of a nipype workflow executing sim functions / A demonstration of how to rewrite Spark BIDS in nipype + +## Additional dependencies +* `pip install nipype` + +## Demo +``` +nip_bids ./sim/tests/demo/bids-app-example.json ./sim/tests/demo/ds001 output +``` + +It should produce the following output: +``` +Computed Analyses: Subject [ YES ] - Group [ YES ] + [ SUCCESS ] sub-01 + [ SUCCESS ] sub-02 + [ SUCCESS ] group +``` + +A directory named `output` should also be created with the following content: +``` +avg_brain_size.txt sub-01_brain.nii.gz sub-02_brain.nii.gz +``` + +The content of `avg_brain_size.txt` should be: +``` +Average brain size is 830532 voxels +``` + + + + + diff --git a/sim/other_wf_examples/nipype/nip_bids.py b/sim/other_wf_examples/nipype/nip_bids.py new file mode 100755 index 0000000..da3bbdc --- /dev/null +++ b/sim/other_wf_examples/nipype/nip_bids.py @@ -0,0 +1,42 @@ +#!/usr/bin/env python + +from NipBIDS import NipBIDS +import argparse, os + +def is_valid_file(parser, arg): + if not os.path.exists(arg): + parser.error("The file %s does not exist!" % arg) + else: + return arg + +def main(): + + # Arguments parsing + parser=argparse.ArgumentParser() + + # Required inputs + parser.add_argument("bids_app_boutiques_descriptor", help="Boutiques descriptor of the BIDS App that will process the dataset.") + parser.add_argument("bids_dataset", help="BIDS dataset to be processed.") + parser.add_argument("output_dir", help="Output directory.") + + # Optional inputs + parser.add_argument("--skip-participant-analysis", action = 'store_true', help="Skips participant analysis.") + parser.add_argument("--skip-group-analysis", action = 'store_true', help="Skips groups analysis.") + parser.add_argument("--skip-participants", metavar="FILE", type=lambda x: is_valid_file(parser, x), help="Skips participant labels in the text file.") + args=parser.parse_args() + + nip_bids = NipBIDS(args.bids_app_boutiques_descriptor, + args.bids_dataset, + args.output_dir, + { 'skip_participant_analysis': args.skip_participant_analysis, + 'skip_group_analysis': args.skip_group_analysis, + 'skip_participants_file': args.skip_participants}) + + + + # Run! + nip_bids.run() + +# Execute program +if __name__ == "__main__": + main() diff --git a/sim/other_wf_examples/nipype/tests/test_all.py b/sim/other_wf_examples/nipype/tests/test_all.py new file mode 100755 index 0000000..7bd2d9d --- /dev/null +++ b/sim/other_wf_examples/nipype/tests/test_all.py @@ -0,0 +1,58 @@ +import os, pytest, random, subprocess, time +from unittest import TestCase +import boutiques + +class TestNip(TestCase): + + ## UTILITY METHODS + def get_nip_dir(self): + return os.path.join(os.path.dirname(__file__),"..") + + def get_demo_dir(self): + return os.path.join(os.path.dirname(__file__),"../../../tests/demo") + + def get_json_descriptor(self): + return os.path.join(self.get_demo_dir(),"bids-app-example.json") + + def run_nip_bids(self,checkOutputGroup=True,options=[],correctBrainSize="830532",output_name=None): + millitime = int(time.time()*1000) + if not output_name: + output_name = "output"+str(random.SystemRandom().randint(0,int(millitime))) + command = [os.path.join(self.get_nip_dir(), + "nip_bids.py"), + self.get_json_descriptor(), + os.path.join(self.get_demo_dir(),"ds001"), + output_name] + for option in options: + command.append(option) + try: + stdout_string = subprocess.check_output(command, + stderr=subprocess.STDOUT) + except: + self.assertTrue(False,"Command-line execution failed {0}".format(str(command))) + self.assertTrue("ERROR" not in stdout_string) + if checkOutputGroup: + assert(os.path.isfile(os.path.join(output_name,"avg_brain_size.txt"))) + with open(os.path.join(output_name,"avg_brain_size.txt")) as f: + output_content = f.read() + content = "Average brain size is {0} voxels".format(correctBrainSize) + self.assertTrue(output_content == content) + + ## TESTS + def test_demo_descriptor_valid(self): + self.assertFalse(boutiques.validate(self.get_json_descriptor(),"-b")) + + def test_nip_bids_no_option(self): + self.run_nip_bids() + + def test_nip_bids_separate_analyses(self): + self.run_nip_bids(options=["--skip-group-analysis"],checkOutputGroup=False,output_name="output") # just participant analysis + self.run_nip_bids(options=["--skip-participant-analysis"],output_name="output") # just the group analysis + + def test_nip_bids_skip_participant(self): + participant_file = "skip.txt" + with open(participant_file,"w") as f: + f.write("01") + self.run_nip_bids(options=["--skip-participants","skip.txt"],correctBrainSize="865472") + + diff --git a/sim/tests/README.md b/sim/tests/README.md new file mode 100644 index 0000000..c33fb17 --- /dev/null +++ b/sim/tests/README.md @@ -0,0 +1,10 @@ +# Tests + +To check coverage, install coveralls: +```pip install coveralls``` + +Run the tests through coveral: +```coverage run --source sim setup.py test``` + +Print the coverage report: +```coverage report -m``` \ No newline at end of file diff --git a/test/demo/bids-app-example.json b/sim/tests/demo/bids-app-example.json similarity index 100% rename from test/demo/bids-app-example.json rename to sim/tests/demo/bids-app-example.json diff --git a/test/demo/ds001/CHANGES b/sim/tests/demo/ds001/CHANGES similarity index 100% rename from test/demo/ds001/CHANGES rename to sim/tests/demo/ds001/CHANGES diff --git a/test/demo/ds001/README b/sim/tests/demo/ds001/README similarity index 100% rename from test/demo/ds001/README rename to sim/tests/demo/ds001/README diff --git a/test/demo/ds001/dataset_description.json b/sim/tests/demo/ds001/dataset_description.json similarity index 100% rename from test/demo/ds001/dataset_description.json rename to sim/tests/demo/ds001/dataset_description.json diff --git a/test/demo/ds001/participants.tsv b/sim/tests/demo/ds001/participants.tsv similarity index 100% rename from test/demo/ds001/participants.tsv rename to sim/tests/demo/ds001/participants.tsv diff --git a/test/demo/ds001/sub-01/anat/sub-01_T1w.nii.gz b/sim/tests/demo/ds001/sub-01/anat/sub-01_T1w.nii.gz similarity index 100% rename from test/demo/ds001/sub-01/anat/sub-01_T1w.nii.gz rename to sim/tests/demo/ds001/sub-01/anat/sub-01_T1w.nii.gz diff --git a/test/demo/ds001/sub-01/anat/sub-01_inplaneT2.nii.gz b/sim/tests/demo/ds001/sub-01/anat/sub-01_inplaneT2.nii.gz similarity index 100% rename from test/demo/ds001/sub-01/anat/sub-01_inplaneT2.nii.gz rename to sim/tests/demo/ds001/sub-01/anat/sub-01_inplaneT2.nii.gz diff --git a/test/demo/ds001/sub-02/anat/sub-02_T1w.nii.gz b/sim/tests/demo/ds001/sub-02/anat/sub-02_T1w.nii.gz similarity index 100% rename from test/demo/ds001/sub-02/anat/sub-02_T1w.nii.gz rename to sim/tests/demo/ds001/sub-02/anat/sub-02_T1w.nii.gz diff --git a/test/demo/ds001/sub-02/anat/sub-02_inplaneT2.nii.gz b/sim/tests/demo/ds001/sub-02/anat/sub-02_inplaneT2.nii.gz similarity index 100% rename from test/demo/ds001/sub-02/anat/sub-02_inplaneT2.nii.gz rename to sim/tests/demo/ds001/sub-02/anat/sub-02_inplaneT2.nii.gz diff --git a/test/demo/ds001/task-balloonanalogrisktask_bold.json b/sim/tests/demo/ds001/task-balloonanalogrisktask_bold.json similarity index 100% rename from test/demo/ds001/task-balloonanalogrisktask_bold.json rename to sim/tests/demo/ds001/task-balloonanalogrisktask_bold.json diff --git a/sim/tests/execute_pytest.sh b/sim/tests/execute_pytest.sh new file mode 100644 index 0000000..535512b --- /dev/null +++ b/sim/tests/execute_pytest.sh @@ -0,0 +1,13 @@ +# !/bin/bash + +# install sim +pip install -e ../ + + +# sparkBIDS test +pytest --cov=./ tests + +echo $PYTHONPATH +# nipBIDS test +pytest other_wf_examples/nipype/tests + diff --git a/test/test_all.py b/sim/tests/test_all.py similarity index 82% rename from test/test_all.py rename to sim/tests/test_all.py index 37ed71b..e9e2f23 100755 --- a/test/test_all.py +++ b/sim/tests/test_all.py @@ -1,14 +1,13 @@ import os, pytest, random, subprocess, time from unittest import TestCase - -#def test_bids_validator(): - # subprocess.call(["bids-validator","../demo/ds001"]) +import boutiques class TestSim(TestCase): ## UTILITY METHODS + def get_sim_dir(self): - return os.path.join(os.path.dirname(__file__),"../sim") + return os.path.join(os.path.dirname(__file__),"..") def get_demo_dir(self): return os.path.join(os.path.dirname(__file__),"demo") @@ -30,9 +29,9 @@ def run_spark_bids(self,checkOutputGroup=True,options=[],correctBrainSize="83053 try: stdout_string = subprocess.check_output(command, stderr=subprocess.STDOUT) - except: + except subprocess.CalledProcessError as e: self.assertTrue(False,"Command-line execution failed {0}".format(str(command))) - self.assertTrue("ERROR" not in stdout_string) + self.assertTrue(bytes("ERROR") not in stdout_string) if checkOutputGroup: assert(os.path.isfile(os.path.join(output_name,"avg_brain_size.txt"))) with open(os.path.join(output_name,"avg_brain_size.txt")) as f: @@ -42,9 +41,7 @@ def run_spark_bids(self,checkOutputGroup=True,options=[],correctBrainSize="83053 ## TESTS def test_demo_descriptor_valid(self): - self.assertFalse(subprocess.call(["bosh-validate", - self.get_json_descriptor() - ,"-b"])) + self.assertFalse(boutiques.validate(self.get_json_descriptor(),"-b")) def test_spark_bids_no_option(self): self.run_spark_bids() @@ -57,9 +54,10 @@ def test_spark_bids_skip_participant(self): participant_file = "skip.txt" with open(participant_file,"w") as f: f.write("01") - self.run_spark_bids(options=["--skip-participants","skip.txt"],correctBrainSize="865472") - + self.run_spark_bids(options=["--skip-participants", "skip.txt"],correctBrainSize="865472") + def test_spark_bids_hdfs(self): + pytest.importorskip("hdfs") self.run_spark_bids(options=["--hdfs"])