diff --git a/Docker/Dockerfile b/Docker/Dockerfile index a0d4231..3c27a04 100644 --- a/Docker/Dockerfile +++ b/Docker/Dockerfile @@ -1,6 +1,6 @@ FROM perl:5.30-slim-stretch MAINTAINER Miguel Machado -LABEL version="4.2.1-01" +LABEL version="4.2.2-01" WORKDIR /NGStools/ @@ -46,6 +46,8 @@ ENV PATH="/NGStools/ncbi-blast-2.9.0+/bin:/NGStools/any2fasta:${PATH}" # --- mlst ---- RUN git clone https://github.com/tseemann/mlst.git ENV PATH="/NGStools/mlst/bin:${PATH}" +# Update Clostridium to Clostridioides +RUN echo -e 'cdifficile\tClostridioides\tdifficile' >> /NGStools/mlst/db/scheme_species_map.tab # --- ReMatCh --- # TODO: to be used after converting INNUca do Python v3 @@ -60,7 +62,7 @@ ENV PATH="/NGStools/ReMatCh/ReMatCh/src/samtools-1.3.1/bin:/NGStools/ReMatCh/ReM # --- INNUca --- RUN git clone https://github.com/B-UMMI/INNUca.git && \ pip install setuptools -ENV PATH="/NGStools/INNUca/src/fastqc_v0.11.5:/NGStools/INNUca/src/pilon_v1.23:/NGStools/INNUca/src/SPAdes-3.13.0-Linux/bin:/NGStools/INNUca/src/Trimmomatic-0.38:/NGStools/INNUca:${PATH}" +ENV PATH="/NGStools/INNUca/src/fastqc_v0.11.5:/NGStools/INNUca/src/pilon_v1.23:/NGStools/INNUca/src/SPAdes-3.14.0-Linux/bin:/NGStools/INNUca/src/Trimmomatic-0.38:/NGStools/INNUca:${PATH}" # fixing permissions for MLST update RUN chmod +x /NGStools/INNUca/Docker/update_mlst_db.sh && chmod o+wr /NGStools/mlst/scripts/ && chmod -R o+wr /NGStools/mlst/db/ && sed -i "s#OUTDIR=pubmlst#OUTDIR=/NGStools/mlst/scripts/pubmlst#1" /NGStools/mlst/scripts/mlst-download_pub_mlst diff --git a/Docker/README.md b/Docker/README.md index 6024771..8d3da4d 100644 --- a/Docker/README.md +++ b/Docker/README.md @@ -1,25 +1,29 @@ -INNUca.py - Docker -=============== +[![dockeri.co](https://dockeri.co/image/ummidock/innuca)](https://hub.docker.com/r/ummidock/innuca) + +# INNUca.py - Docker + INNUca - Reads Control and Assembly *INNUENDO quality control of reads, de novo assembly and contigs quality assessment, and possible contamination search* + + This is a dockerfile for using INNUca, with all dependencies already installed. Within this container you can find: - Debian Stretch (9) -- Perl v5.30 +- Perl v5.30.1 - git v2.11.0 - Python v2.7 - Java-JDK v1.8.0_40 headless - [Blast+](https://blast.ncbi.nlm.nih.gov/Blast.cgi) v2.9.0 -- [mlst](https://github.com/tseemann/mlst) v2.18.0 +- [mlst](https://github.com/tseemann/mlst) v2.18.1 - [ReMatCh](https://github.com/B-UMMI/ReMatCh) v4.1.0 - [Kraken](https://ccb.jhu.edu/software/kraken/) v2.0.7 -- [INNUca](https://github.com/B-UMMI/INNUca) v4.2.1 +- [INNUca](https://github.com/B-UMMI/INNUca) v4.2.2 @@ -30,7 +34,7 @@ Within [play-with-docker](http://labs.play-with-docker.com/) webpage click on ** will open with a big counter on the upper left corner. Click on **+ add new instance** and a terminal like instance should be generated on the right. On this terminal you can load this docker image as follows: -`docker pull ummidock/innuca:4.2.1-01` +`docker pull ummidock/innuca:4.2.2-01` #### Build this docker on your local machine @@ -38,15 +42,15 @@ For this, docker needs to be installed on your machine. Instructions for this ca ##### Using DockerHub (automated build image) -`docker pull ummidock/innuca:4.2.1-01` +`docker pull ummidock/innuca:4.2.2-01` ##### Using GitHub (build docker image) 1) `git clone https://github.com/B-UMMI/INNUca.git` -2) `docker build -t ummidock/innuca:4.2.1-01 ./INNUca/Docker/` +2) `docker build -t ummidock/innuca:4.2.2-01 ./INNUca/Docker/` ### Run (using automated build image) - docker run --rm -u $(id -u):$(id -g) -it -v /local/folder/fastq_data:/data/ ummidock/innuca:4.2.1-01 INNUca.py --speciesExpected "Streptococcus agalactiae" --genomeSizeExpectedMb 2.1 --inputDirectory /data/ --outdir /data/innuca_output/ --threads 8 --maxNumberContigs 100 + docker run --rm -u $(id -u):$(id -g) -it -v /local/folder/fastq_data:/data/ ummidock/innuca:4.2.2-01 INNUca.py --speciesExpected "Streptococcus agalactiae" --genomeSizeExpectedMb 2.1 --inputDirectory /data/ --outdir /data/innuca_output/ --threads 8 --maxNumberContigs 100 ### udocker @@ -54,13 +58,13 @@ For this, docker needs to be installed on your machine. Instructions for this ca ```bash # Get Docker image -udocker pull ummidock/innuca:4.2.1-01 +udocker pull ummidock/innuca:4.2.2-01 # Create container (only needed to be done once) -udocker create --name=innuca_4-2-1_01 ummidock/innuca:4.2.1-01 +udocker create --name=innuca_4-2-2_01 ummidock/innuca:4.2.2-01 # Run INNUca -udocker run --user $(id -u):$(id -g) -v /local/folder/fastq_data:/data/ innuca_4-2-1_01 INNUca.py --speciesExpected "Streptococcus agalactiae" --genomeSizeExpectedMb 2.1 --inputDirectory /data/ --outdir /data/innuca_output/ --threads 8 --maxNumberContigs 100 +udocker run --user $(id -u):$(id -g) -v /local/folder/fastq_data:/data/ innuca_4-2-2_01 INNUca.py --speciesExpected "Streptococcus agalactiae" --genomeSizeExpectedMb 2.1 --inputDirectory /data/ --outdir /data/innuca_output/ --threads 8 --maxNumberContigs 100 ``` More examples on how to use **udocker** can be found in **udocker** [GitHub page](https://github.com/indigo-dc/udocker) diff --git a/INNUca.py b/INNUca.py index 1d7a7b0..4a66f1b 100755 --- a/INNUca.py +++ b/INNUca.py @@ -11,7 +11,7 @@ Copyright (C) 2018 Miguel Machado -Last modified: November 25, 2019 +Last modified: February 05, 2020 This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -91,7 +91,7 @@ def include_rematch_dependencies_path(do_not_use_provided_software): def main(): - version = '4.2.1' + version = '4.2.2' args = utils.parseArguments(version) general_start_time = time.time() @@ -138,12 +138,12 @@ def main(): # Check programms programs_version_dictionary = {} - programs_version_dictionary['gunzip'] = ['--version', '>=', '1.6'] + programs_version_dictionary['gunzip'] = {'required': ['--version', '>=', '1.6']} # Java check first for java dependents check next if not (args.skipFastQC and args.skipTrimmomatic and (args.skipPilon or args.skipSPAdes)): # programs_version_dictionary['java'] = ['-version', '>=', '1.8'] - programs_version_dictionary['java'] = [None, '>=', '1.8'] # For OpenJDK compatibility + programs_version_dictionary['java'] = {'required': [None, '>=', '1.8']} # For OpenJDK compatibility missingPrograms, programs_version_dictionary = utils.checkPrograms(programs_version_dictionary) if len(missingPrograms) > 0: sys.exit('\n' + 'Errors:' + '\n' + '\n'.join(missingPrograms)) @@ -154,35 +154,35 @@ def main(): global version_kraken_global version_kraken_global = kraken_version() if version_kraken_global == 2: - programs_version_dictionary['kraken2'] = ['--version', '>=', '2.0.6'] + programs_version_dictionary['kraken2'] = {'required': ['--version', '>=', '2.0.6']} else: - programs_version_dictionary['kraken'] = ['--version', '>=', '0.10.6'] - programs_version_dictionary['kraken-repor'] = ['--version', '>=', '0.10.6'] + programs_version_dictionary['kraken'] = {'required': ['--version', '>=', '0.10.6']} + programs_version_dictionary['kraken-repor'] = {'required': ['--version', '>=', '0.10.6']} if not args.skipTrueCoverage and trueCoverage_config is not None: rematch_script = include_rematch_dependencies_path(args.doNotUseProvidedSoftware) - programs_version_dictionary['rematch.py'] = ['--version', '>=', '4.0.1'] - programs_version_dictionary['bcftools'] = ['--version', '==', '1.3.1'] + programs_version_dictionary['rematch.py'] = {'required': ['--version', '>=', '4.0.1']} + programs_version_dictionary['bcftools'] = {'required': ['--version', '==', '1.3.1']} if not (args.skipTrueCoverage and ((args.skipAssemblyMapping and args.skipPilon) or args.skipSPAdes)): - programs_version_dictionary['bowtie2'] = ['--version', '>=', '2.2.9'] - programs_version_dictionary['samtools'] = ['--version', '==', '1.3.1'] + programs_version_dictionary['bowtie2'] = {'required': ['--version', '>=', '2.2.9']} + programs_version_dictionary['samtools'] = {'required': ['--version', '==', '1.3.1']} if not args.skipFastQC: - programs_version_dictionary['fastqc'] = ['--version', '==', '0.11.5'] + programs_version_dictionary['fastqc'] = {'required': ['--version', '==', '0.11.5']} if not args.skipTrimmomatic: - programs_version_dictionary['trimmomatic-{version}.jar'.format(version=args.trimVersion)] = ['-version', '==', - args.trimVersion] + programs_version_dictionary['trimmomatic-{version}.jar'.format(version=args.trimVersion)] = \ + {'required': ['-version', '==', args.trimVersion]} if args.runPear: - programs_version_dictionary['pear'] = ['--version', '>=', '0.9.10'] + programs_version_dictionary['pear'] = {'required': ['--version', '>=', '0.9.10']} if not args.skipSPAdes: - programs_version_dictionary['spades.py'] = ['--version', '>=', '3.9.0'] + programs_version_dictionary['spades.py'] = {'required': ['--version', '>=', '3.9.0']} if not (args.skipPilon or args.skipSPAdes): - programs_version_dictionary['pilon-{version}.jar'.format(version=args.pilonVersion)] = ['--version', '==', - args.pilonVersion] + programs_version_dictionary['pilon-{version}.jar'.format(version=args.pilonVersion)] = \ + {'required': ['--version', '==', args.pilonVersion]} if not (args.skipMLST or args.skipSPAdes): - programs_version_dictionary['mlst'] = ['--version', '>=', '2.4'] + programs_version_dictionary['mlst'] = {'required': ['--version', '>=', '2.4']} if args.runInsertSize and not args.skipSPAdes: if args.skipAssemblyMapping and args.skipPilon: - programs_version_dictionary['bowtie2'] = ['--version', '>=', '2.2.9'] - programs_version_dictionary['samtools'] = ['--version', '==', '1.3.1'] + programs_version_dictionary['bowtie2'] = {'required': ['--version', '>=', '2.2.9']} + programs_version_dictionary['samtools'] = {'required': ['--version', '==', '1.3.1']} # Set and print PATH variable utils.setPATHvariable(args, script_path) @@ -195,11 +195,17 @@ def main(): jar_path_trimmomatic = None if not args.skipTrimmomatic: jar_path_trimmomatic = \ - programs_version_dictionary['trimmomatic-{version}.jar'.format(version=args.trimVersion)][3] + programs_version_dictionary['trimmomatic-{version}.jar'.format(version=args.trimVersion)]['found']['path'] jar_path_pilon = None if not args.skipPilon and not args.skipSPAdes: - jar_path_pilon = programs_version_dictionary['pilon-{version}.jar'.format(version=args.pilonVersion)][3] + jar_path_pilon = \ + programs_version_dictionary['pilon-{version}.jar'.format(version=args.pilonVersion)]['found']['path'] + + # Get SPAdes version + spades_version = None + if not args.skipSPAdes: + spades_version = programs_version_dictionary['spades.py']['found']['version'] # pairEnd_filesSeparation_list = args.pairEnd_filesSeparation pairEnd_filesSeparation_list = None @@ -207,7 +213,7 @@ def main(): get_samples(args.inputDirectory, args.fastq, outdir, pairEnd_filesSeparation_list) # Start running the analysis - print '\n' + 'RUNNING INNUca.py' + print('\n' + 'RUNNING INNUca.py') # Prepare run report file samples_report_path = os.path.join(outdir, 'samples_report.' + time_str + '.tab') @@ -230,12 +236,12 @@ def main(): # Determine SPAdes maximum memory spadesMaxMemory = None if not args.skipSPAdes: - print '' + print('') spadesMaxMemory = spades.define_memory(args.spadesMaxMemory, args.threads, available_memory_GB) # Determine .jar maximum memory jarMaxMemory = 'off' if not (args.skipTrimmomatic and (args.skipSPAdes or args.skipPilon)): - print '' + print('') jarMaxMemory = utils.define_jar_max_memory(args.jarMaxMemory, args.threads, available_memory_GB) # Run INNUca for each sample @@ -243,7 +249,7 @@ def main(): for sample in samples: sample_start_time = time.time() - print '\n' + 'Sample: ' + sample + '\n' + print('\n' + 'Sample: ' + sample + '\n') # Create sample outdir sample_outdir = os.path.abspath(os.path.join(outdir, sample, '')) @@ -253,21 +259,21 @@ def main(): # Get fastq files fastq_files = utils.searchFastqFiles(os.path.join(inputDirectory, sample, ''), pairEnd_filesSeparation_list, False) if len(fastq_files) == 1: - print 'Only one fastq file was found: ' + str(fastq_files) - print 'Pair-End sequencing is required. Moving to the next sample' + print('Only one fastq file was found: ' + str(fastq_files)) + print('Pair-End sequencing is required. Moving to the next sample') continue elif len(fastq_files) == 0: - print 'No compressed fastq files were found. Continue to the next sample' + print('No compressed fastq files were found. Continue to the next sample') continue - print 'The following files will be used:' - print str(fastq_files) + '\n' + print('The following files will be used:') + print(str(fastq_files) + '\n') # Run INNUca.py analysis run_successfully, pass_qc, run_report = \ run_innuca(sample, sample_outdir, fastq_files, args, script_path, scheme, spadesMaxMemory, jar_path_trimmomatic, jar_path_pilon, jarMaxMemory, trueCoverage_config, rematch_script, - species_genus, mlst_scheme_genus) + species_genus, mlst_scheme_genus, spades_version=spades_version) # Save sample fail report utils.write_fail_report(os.path.join(sample_outdir, 'fail_report.txt'), run_report) @@ -286,7 +292,7 @@ def main(): if args.fastq is not None: utils.removeDirectory(os.path.join(outdir, 'reads', '')) - print 'END ' + sample + ' analysis' + print('END ' + sample + ' analysis') time_taken = utils.runTime(sample_start_time) # Save run report @@ -389,7 +395,8 @@ def get_samples(args_input_directory, args_fastq, outdir, pair_end_files_separat def run_innuca(sample_name, outdir, fastq_files, args, script_path, scheme, spades_max_memory, jar_path_trimmomatic, - jar_path_pilon, jar_max_memory, true_coverage_config, rematch_script, species_genus, mlst_scheme_genus): + jar_path_pilon, jar_max_memory, true_coverage_config, rematch_script, species_genus, mlst_scheme_genus, + spades_version=None): threads = args.threads adapters_fasta = args.adapters if adapters_fasta is not None: @@ -624,7 +631,9 @@ def run_innuca(sample_name, outdir, fastq_files, args, script_path, scheme, spad args.spadesMinCoverageAssembly, args.spadesMinContigsLength, genome_size, args.spadesKmers, max_reads_length, args.spadesDefaultKmers, args.spadesMinKmerCovContigs, assembled_se_reads, args.saveExcludedContigs, - args.maxNumberContigs, args.keepSPAdesScaffolds) + args.maxNumberContigs, args.keepSPAdesScaffolds, spades_version=spades_version, + estimated_coverage=estimated_coverage, + spades_not_use_isolate=args.spadesNotUseIsolate) runs['SPAdes'] = [run_successfully, pass_qc, time_taken, failing, warning, 'NA'] if run_successfully: diff --git a/README.md b/README.md index b393105..14a590d 100644 --- a/README.md +++ b/README.md @@ -79,7 +79,7 @@ usage: INNUca.py [-h] [--version] -s "Streptococcus agalactiae" -g 2.1 [--trimKeepFiles] [--doNotTrimCrops] [--trimCrop N] [--trimHeadCrop N] [--trimSlidingWindow window:meanQuality] [--trimLeading N] [--trimTrailing N] [--trimMinLength N] - [--spadesVersion 3.13.0] [--spadesNotUseCareful] + [--spadesVersion 3.13.0] [--spadesNotUseCareful] [--spadesNotUseIsolate] [--spadesMinContigsLength N] [--spadesMaxMemory N] [--spadesMinCoverageAssembly N] [--spadesMinKmerCovContigs N] [--spadesKmers 55 77 [55 77 ...] | --spadesDefaultKmers] @@ -295,12 +295,24 @@ Trimmomatic options: length (default: 55) (default: 55) SPAdes options: - --spadesVersion 3.13.0 + --spadesVersion 3.14.0 Tells INNUca.py which SPAdes version to use (available - options: 3.10.1, 3.11.1, 3.13.0) (default: 3.13.0) + options: 3.11.1, 3.13.0, 3.14.0) (default: 3.14.0) --spadesNotUseCareful - Tells SPAdes to only perform the assembly without the - --careful option (default: False) + Tells SPAdes to perform the assembly without the --careful option. + When the SPAdes --isolate option is allowed to be used (for SPAdes >= v4.14.0 + and in the cases that INNUca --spadesNotUseIsolate option is not used) and the + estimated depth of coverage is >= 100x, the SPAdes --careful option is not used + anyway. (default: False) + --spadesNotUseIsolate + Tells SPAdes to not use --isolate option (only possible for SPAdes >= v3.14.0). + The SPAdes --isolate option is used when the estimated depth of coverage + is >= 100x (unless the INNUca --spadesNotUseIsolate is used) and automatically + turns on the INNUca --spadesNotUseCareful option and consequently do not use + the SPAdes --careful option. + Accordingally to SPAdes, the --isolate option is highly recommended for + high-coverage isolate and multi-cell data (improves the assembly quality and + running time). (default: False) --spadesMinContigsLength N Filter SPAdes contigs for length greater or equal than this value (default: maximum reads size or 200 bp) diff --git a/modules/spades.py b/modules/spades.py index 5c34b4d..dc95045 100644 --- a/modules/spades.py +++ b/modules/spades.py @@ -8,7 +8,7 @@ # Run Spades def spades(spades_folder, threads, fastq_files, notUseCareful, maxMemory, minCoverageAssembly, kmers, - assembled_se_reads): + assembled_se_reads, spades_version=None, estimated_coverage=None, spades_not_use_isolate=False): contigs = os.path.join(spades_folder, 'contigs.fasta') command = ['spades.py', '', '--only-assembler', '--threads', str(threads), '--memory', str(maxMemory), @@ -18,6 +18,11 @@ def spades(spades_folder, threads, fastq_files, notUseCareful, maxMemory, minCov if not notUseCareful: command[1] = '--careful' + if not spades_not_use_isolate and \ + spades_version is not None and float('.'.join(spades_version.split('.')[:2])) >= 3.14 and \ + estimated_coverage is not None and estimated_coverage >= 100: + command[1] = '--isolate' # Isolate mode already implies --only-assembler, so this option has no effect. + if len(kmers) > 0: kmers = ','.join(map(str, kmers)) command[9] = str('-k ' + kmers) @@ -55,12 +60,18 @@ def define_minContigsLength(maximumReadsLength, minContigsLength): return minimum_length -def define_memory(maxMemory, threads, available_memory_GB): +def define_memory(maxMemory, threads, available_memory_GB, spades_version=None): GB_per_thread = 2048 / 1024.0 + if spades_version is not None and float('.'.join(spades_version.split('.')[:2])) >= 3.14: + GB_per_thread = 3072 / 1024.0 minimum_required_memory_GB = GB_per_thread * threads - if minimum_required_memory_GB < 4: - minimum_required_memory_GB = 4 + if spades_version is not None and float('.'.join(spades_version.split('.')[:2])) >= 3.14: + if minimum_required_memory_GB < 6: + minimum_required_memory_GB = 6 + else: + if minimum_required_memory_GB < 4: + minimum_required_memory_GB = 4 if available_memory_GB == 0: print('WARNING: it was not possible to determine the free available memory!') @@ -255,7 +266,7 @@ def decide_filter_parameters(sequence_dict, minContigsLength, minCoverageContigs def run_spades(sample_name, outdir, threads, fastq_files, not_use_careful, max_memory, min_coverage_assembly, min_contigs_length, estimated_genome_size_mb, kmers, maximum_reads_length, default_kmers, min_coverage_contigs, assembled_se_reads, save_excluded_contigs, max_number_contigs, - keep_scaffolds=False): + keep_scaffolds=False, spades_version=None, estimated_coverage=None, spades_not_use_isolate=False): pass_qc = True failing = {'sample': False} warnings = {} @@ -276,7 +287,9 @@ def run_spades(sample_name, outdir, threads, fastq_files, not_use_careful, max_m print('SPAdes will use the following k-mers: ' + str(kmers)) run_successfully, contigs = spades(spades_folder, threads, fastq_files, not_use_careful, max_memory, - min_coverage_assembly, kmers, assembled_se_reads) + min_coverage_assembly, kmers, assembled_se_reads, spades_version=spades_version, + estimated_coverage=estimated_coverage, + spades_not_use_isolate=spades_not_use_isolate) if run_successfully: scaffolds = os.path.join(spades_folder, 'scaffolds.fasta') diff --git a/modules/utils.py b/modules/utils.py index 969b140..004ba9e 100644 --- a/modules/utils.py +++ b/modules/utils.py @@ -222,17 +222,30 @@ def parseArguments(version): required=False, default=55) spades_options = parser.add_argument_group(title='SPAdes options') - spades_options.add_argument('--spadesVersion', type=str, metavar='3.13.0', + spades_options.add_argument('--spadesVersion', type=str, metavar='3.14.0', help='Tells INNUca.py which SPAdes version to use (available options: %(choices)s)', - choices=['3.10.1', '3.11.1', '3.13.0'], required=False, default='3.13.0') + choices=['3.11.1', '3.13.0', '3.14.0'], required=False, default='3.14.0') spades_options.add_argument('--spadesNotUseCareful', action='store_true', - help='Tells SPAdes to only perform the assembly without the --careful option') + help='Tells SPAdes to perform the assembly without the --careful option.' + ' When the SPAdes --isolate option is allowed to be used (for SPAdes >= v4.14.0' + ' and in the cases that INNUca --spadesNotUseIsolate option is not used) and the' + ' estimated depth of coverage is >= 100x, the SPAdes --careful option is not used' + ' anyway.') + spades_options.add_argument('--spadesNotUseIsolate', action='store_true', + help='Tells SPAdes to not use --isolate option (only possible for SPAdes >= v3.14.0).' + ' The SPAdes --isolate option is used when the estimated depth of coverage' + ' is >= 100x (unless the INNUca --spadesNotUseIsolate is used) and automatically' + ' turns on the INNUca --spadesNotUseCareful option and consequently do not use' + ' the SPAdes --careful option.' + ' Accordingally to SPAdes, the --isolate option is highly recommended for' + ' high-coverage isolate and multi-cell data (improves the assembly quality and' + ' running time).') spades_options.add_argument('--spadesMinContigsLength', type=int, metavar='N', help='Filter SPAdes contigs for length greater or equal than this value (default:' ' maximum reads size or 200 bp)', required=False) spades_options.add_argument('--spadesMaxMemory', type=int, metavar='N', - help='The maximum amount of RAM Gb for SPAdes to use (default: 2 Gb per thread will be' + help='The maximum amount of RAM Gb for SPAdes to use (default: 3 Gb per thread will be' ' used up to the free available memory)', required=False) spades_options.add_argument('--spadesMinCoverageAssembly', type=spades_cov_cutoff, metavar='N', @@ -481,20 +494,23 @@ def checkPrograms(programs_version_dictionary): which_program = ['which', ''] listMissings = [] for program in programs: + programs[program]['found'] = {'path': None, 'version': None} which_program[1] = program run_successfully, stdout, stderr = runCommandPopenCommunicate(which_program, False, None, False) if not run_successfully: listMissings.append(program + ' not found in PATH.') else: - print(stdout.splitlines()[0]) - programs[program].append(stdout.splitlines()[0]) - if programs[program][0] is None: - print(program + ' (impossible to determine programme version) found at: ' + stdout.splitlines()[0]) + programs[program]['found']['path'] = stdout.splitlines()[0] + print(programs[program]['found']['path']) + if programs[program]['required'][0] is None: + print('{program} (impossible to determine program version) found at: {path}'.format( + program=program, path=programs[program]['found']['path'])) else: if program.endswith('.jar'): - check_version = ['java', '-jar', stdout.splitlines()[0], programs[program][0]] + check_version = ['java', '-jar', programs[program]['found']['path'], + programs[program]['required'][0]] else: - check_version = [stdout.splitlines()[0], programs[program][0]] + check_version = [programs[program]['found']['path'], programs[program]['required'][0]] run_successfully, stdout, stderr = runCommandPopenCommunicate(check_version, False, None, False) if stdout == '': stdout = stderr @@ -509,10 +525,11 @@ def checkPrograms(programs_version_dictionary): replace_characters = ['"', 'v', 'V', '+'] for i in replace_characters: version_line = version_line.replace(i, '') + programs[program]['found']['version'] = version_line print(program + ' (' + version_line + ') found') - if programs[program][1] == '>=': + if programs[program]['required'][1] == '>=': program_found_version = version_line.split('.') - program_version_required = programs[program][2].split('.') + program_version_required = programs[program]['required'][2].split('.') if len(program_version_required) == 3: if len(program_found_version) == 2: program_found_version.append(0) @@ -525,13 +542,13 @@ def checkPrograms(programs_version_dictionary): continue else: listMissings.append( - 'It is required ' + program + ' with version ' + programs[program][1] + ' ' + - programs[program][2]) + 'It is required ' + program + ' with version ' + + programs[program]['required'][1] + ' ' + programs[program]['required'][2]) else: - if version_line != programs[program][2]: + if version_line != programs[program]['required'][2]: listMissings.append( - 'It is required ' + program + ' with version ' + programs[program][1] + ' ' + - programs[program][2]) + 'It is required ' + program + ' with version ' + + programs[program]['required'][1] + ' ' + programs[program]['required'][2]) return listMissings, programs @@ -690,6 +707,9 @@ def checkSetInputDirectory(inputDirectory, outdir, pairEnd_filesSeparation_list) continue elif len(files) >= 1: samples.append(directory) + else: + print('WARNING: no fastq files were found for sample {sample}!' + ' This sample will be ignored. (one common problem is broken links)'.format(sample=directory)) if len(samples) == 0: sys.exit('There is no fastq files for the samples folders provided! Make sure fastq files ends with .fastq.gz' ' or .fq.gz, and the pair-end information is either in _R1_001. or _1. format.') diff --git a/src/SPAdes-3.10.1-Linux/bin/bwa-spades b/src/SPAdes-3.10.1-Linux/bin/bwa-spades deleted file mode 100755 index 6923631..0000000 Binary files a/src/SPAdes-3.10.1-Linux/bin/bwa-spades and /dev/null differ diff --git a/src/SPAdes-3.10.1-Linux/bin/corrector b/src/SPAdes-3.10.1-Linux/bin/corrector deleted file mode 100755 index e1d2ef7..0000000 Binary files a/src/SPAdes-3.10.1-Linux/bin/corrector and /dev/null differ diff --git a/src/SPAdes-3.10.1-Linux/bin/dipspades b/src/SPAdes-3.10.1-Linux/bin/dipspades deleted file mode 100755 index 61d859f..0000000 Binary files a/src/SPAdes-3.10.1-Linux/bin/dipspades and /dev/null differ diff --git a/src/SPAdes-3.10.1-Linux/bin/dipspades.py b/src/SPAdes-3.10.1-Linux/bin/dipspades.py deleted file mode 100755 index 8fa2047..0000000 --- a/src/SPAdes-3.10.1-Linux/bin/dipspades.py +++ /dev/null @@ -1,121 +0,0 @@ -#!/usr/bin/env python - -############################################################################ -# Copyright (c) 2015 Saint Petersburg State University -# Copyright (c) 2011-2014 Saint Petersburg Academic University -# All Rights Reserved -# See file LICENSE for details. -############################################################################ - -import os -import sys -import getopt -from os.path import abspath, expanduser - -import spades -import support -import options_storage -import dipspades_logic -import spades_init -spades_init.init() -spades_version = spades_init.spades_version - - -def main(): - all_long_options = list(set(options_storage.long_options + dipspades_logic.DS_Args_List.long_options)) - all_short_options = options_storage.short_options + dipspades_logic.DS_Args_List.short_options - - dipspades_logic_args = [] - spades_py_args = ["--diploid"] - - try: - options, not_options = getopt.gnu_getopt(sys.argv, all_short_options, all_long_options) - except getopt.GetoptError: - _, exc, _ = sys.exc_info() - sys.stderr.write(str(exc) + "\n") - options_storage.usage(spades_version, mode="dip") - sys.stderr.flush() - sys.exit(1) - if not options: - options_storage.usage(spades_version, mode="dip") - sys.stderr.flush() - sys.exit(1) - - output_dir = None - spades_py_run_needed = False - for opt, arg in options: - # processing some special options - if opt == '--test': - output_dir = abspath("test_dipspades") - spades_py_args = ["--diploid", "-1", os.path.join(spades_init.spades_home, "test_dataset/ecoli_1K_1.fq.gz"), - "-2", os.path.join(spades_init.spades_home, "test_dataset/ecoli_1K_2.fq.gz"), "--only-assembler"] - dipspades_logic_args = [] - spades_py_run_needed = True - break - if opt == '-o': - output_dir = abspath(expanduser(arg)) - elif opt == '--careful' or opt == '--mismatch-correction': - continue - if opt == '-v' or opt == '--version': - options_storage.version(spades_version, mode="dip") - sys.exit(0) - if opt == '-h' or opt == '--help': - options_storage.usage(spades_version, mode="dip") - sys.exit(0) - elif opt == "--help-hidden": - options_storage.usage(spades_version, show_hidden=True, mode="dip") - sys.exit(0) - # for all other options - cur_opt_arg = [opt] - if arg: - cur_opt_arg.append(arg) - if opt.startswith("--"): # long option - if opt[2:] in options_storage.long_options or (opt[2:] + "=") in options_storage.long_options: - spades_py_args += cur_opt_arg - if opt[2:] in dipspades_logic.DS_Args_List.long_options or (opt[2:] + "=") in dipspades_logic.DS_Args_List.long_options: - dipspades_logic_args += cur_opt_arg - else: - spades_py_run_needed = True - else: - dipspades_logic_args += cur_opt_arg - else: # short option - if opt != '-o': - if opt[1:] in options_storage.short_options: - spades_py_args += cur_opt_arg - if opt[1:] in dipspades_logic.DS_Args_List.short_options: - dipspades_logic_args += cur_opt_arg - else: - spades_py_run_needed = True - else: - dipspades_logic_args += cur_opt_arg - - if not output_dir: - support.error("The output_dir is not set! It is a mandatory parameter (-o output_dir).", dipspades=True) - - spades_output_dir = os.path.join(output_dir, "spades") - dipspades_output_dir = os.path.join(output_dir, "dipspades") - - if not os.path.isdir(output_dir): - os.makedirs(output_dir) - if not os.path.isdir(spades_output_dir): - os.makedirs(spades_output_dir) - if not os.path.isdir(dipspades_output_dir): - os.makedirs(dipspades_output_dir) - - spades_result = "" - if spades_py_run_needed: - spades_py_args += ["-o", spades_output_dir] - spades.main(spades_py_args) - spades_result = os.path.join(spades_output_dir, "contigs.fasta") - if not os.path.isfile(spades_result): - support.error("Something went wrong and SPAdes did not generate haplocontigs. " - "DipSPAdes cannot proceed without them, aborting.", dipspades=True) - - dipspades_logic_args += ["-o", dipspades_output_dir] - if spades_result != "": - dipspades_logic_args += ["--hap", spades_result] - dipspades_logic.main(dipspades_logic_args, sys.argv, spades.spades_home, spades.bin_home) - - -if __name__ == '__main__': - main() diff --git a/src/SPAdes-3.10.1-Linux/bin/hammer b/src/SPAdes-3.10.1-Linux/bin/hammer deleted file mode 100755 index ab81b02..0000000 Binary files a/src/SPAdes-3.10.1-Linux/bin/hammer and /dev/null differ diff --git a/src/SPAdes-3.10.1-Linux/bin/ionhammer b/src/SPAdes-3.10.1-Linux/bin/ionhammer deleted file mode 100755 index 4e3aece..0000000 Binary files a/src/SPAdes-3.10.1-Linux/bin/ionhammer and /dev/null differ diff --git a/src/SPAdes-3.10.1-Linux/bin/scaffold_correction b/src/SPAdes-3.10.1-Linux/bin/scaffold_correction deleted file mode 100755 index 64cd4a8..0000000 Binary files a/src/SPAdes-3.10.1-Linux/bin/scaffold_correction and /dev/null differ diff --git a/src/SPAdes-3.10.1-Linux/bin/spades b/src/SPAdes-3.10.1-Linux/bin/spades deleted file mode 100755 index 520b246..0000000 Binary files a/src/SPAdes-3.10.1-Linux/bin/spades and /dev/null differ diff --git a/src/SPAdes-3.10.1-Linux/bin/spades.py b/src/SPAdes-3.10.1-Linux/bin/spades.py deleted file mode 100755 index ff31c92..0000000 --- a/src/SPAdes-3.10.1-Linux/bin/spades.py +++ /dev/null @@ -1,1002 +0,0 @@ -#!/usr/bin/env python - -############################################################################ -# Copyright (c) 2015 Saint Petersburg State University -# Copyright (c) 2011-2014 Saint Petersburg Academic University -# All Rights Reserved -# See file LICENSE for details. -############################################################################ - -import os -import shutil -from site import addsitedir -from distutils import dir_util -from os.path import abspath, expanduser -import sys -import getopt -import logging -import platform -import errno - -import spades_init -spades_init.init() -spades_home = spades_init.spades_home -bin_home = spades_init.bin_home -python_modules_home = spades_init.python_modules_home -ext_python_modules_home = spades_init.ext_python_modules_home -spades_version = spades_init.spades_version - -import support -support.check_python_version() - -from process_cfg import merge_configs, empty_config, load_config_from_file -import hammer_logic -import spades_logic -import options_storage -addsitedir(ext_python_modules_home) -if sys.version.startswith('2.'): - import pyyaml2 as pyyaml -elif sys.version.startswith('3.'): - import pyyaml3 as pyyaml - -import moleculo_postprocessing -import alignment - - -def print_used_values(cfg, log): - def print_value(cfg, section, param, pretty_param="", margin=" "): - if not pretty_param: - pretty_param = param.capitalize().replace('_', ' ') - line = margin + pretty_param - if param in cfg[section].__dict__: - line += ": " + str(cfg[section].__dict__[param]) - else: - if param.find("offset") != -1: - line += " will be auto-detected" - log.info(line) - - log.info("") - - # system info - log.info("System information:") - try: - log.info(" SPAdes version: " + str(spades_version).strip()) - log.info(" Python version: " + ".".join(map(str, sys.version_info[0:3]))) - # for more details: '[' + str(sys.version_info) + ']' - log.info(" OS: " + platform.platform()) - # for more details: '[' + str(platform.uname()) + ']' - except Exception: - log.info(" Problem occurred when getting system information") - log.info("") - - # main - print_value(cfg, "common", "output_dir", "", "") - if ("error_correction" in cfg) and (not "assembly" in cfg): - log.info("Mode: ONLY read error correction (without assembling)") - elif (not "error_correction" in cfg) and ("assembly" in cfg): - log.info("Mode: ONLY assembling (without read error correction)") - else: - log.info("Mode: read error correction and assembling") - if ("common" in cfg) and ("developer_mode" in cfg["common"].__dict__): - if cfg["common"].developer_mode: - log.info("Debug mode is turned ON") - else: - log.info("Debug mode is turned OFF") - log.info("") - - # dataset - if "dataset" in cfg: - log.info("Dataset parameters:") - - if options_storage.iontorrent: - log.info(" IonTorrent data") - - if options_storage.meta: - log.info(" Metagenomic mode") - elif options_storage.large_genome: - log.info(" Large genome mode") - elif options_storage.truseq_mode: - log.info(" Illumina TruSeq mode") - elif options_storage.rna: - log.info(" RNA-seq mode") - elif options_storage.single_cell: - log.info(" Single-cell mode") - else: - log.info(" Multi-cell mode (you should set '--sc' flag if input data"\ - " was obtained with MDA (single-cell) technology"\ - " or --meta flag if processing metagenomic dataset)") - - log.info(" Reads:") - dataset_data = pyyaml.load(open(cfg["dataset"].yaml_filename, 'r')) - dataset_data = support.relative2abs_paths(dataset_data, os.path.dirname(cfg["dataset"].yaml_filename)) - support.pretty_print_reads(dataset_data, log) - - # error correction - if "error_correction" in cfg: - log.info("Read error correction parameters:") - print_value(cfg, "error_correction", "max_iterations", "Iterations") - print_value(cfg, "error_correction", "qvoffset", "PHRED offset") - - if cfg["error_correction"].gzip_output: - log.info(" Corrected reads will be compressed (with gzip)") - else: - log.info(" Corrected reads will NOT be compressed (with gzip)") - - # assembly - if "assembly" in cfg: - log.info("Assembly parameters:") - if options_storage.auto_K_allowed(): - log.info(" k: automatic selection based on read length") - else: - print_value(cfg, "assembly", "iterative_K", "k") - if options_storage.plasmid: - log.info(" Plasmid mode is turned ON") - if cfg["assembly"].disable_rr: - log.info(" Repeat resolution is DISABLED") - else: - log.info(" Repeat resolution is enabled") - if options_storage.careful: - log.info(" Mismatch careful mode is turned ON") - else: - log.info(" Mismatch careful mode is turned OFF") - if "mismatch_corrector" in cfg: - log.info(" MismatchCorrector will be used") - else: - log.info(" MismatchCorrector will be SKIPPED") - if cfg["assembly"].cov_cutoff == 'off': - log.info(" Coverage cutoff is turned OFF") - elif cfg["assembly"].cov_cutoff == 'auto': - log.info(" Coverage cutoff is turned ON and threshold will be auto-detected") - else: - log.info(" Coverage cutoff is turned ON and threshold is " + str(cfg["assembly"].cov_cutoff)) - - log.info("Other parameters:") - print_value(cfg, "common", "tmp_dir", "Dir for temp files") - print_value(cfg, "common", "max_threads", "Threads") - print_value(cfg, "common", "max_memory", "Memory limit (in Gb)", " ") - log.info("") - - -def fill_cfg(options_to_parse, log, secondary_filling=False): - skip_output_dir=secondary_filling - skip_stop_after = secondary_filling - load_processed_dataset=secondary_filling - - try: - options, not_options = getopt.gnu_getopt(options_to_parse, options_storage.short_options, options_storage.long_options) - except getopt.GetoptError: - _, exc, _ = sys.exc_info() - sys.stderr.write(str(exc) + "\n") - sys.stderr.flush() - show_usage(1) - - if not options: - show_usage(1) - - if len(not_options) > 1: - for opt, arg in options: - if opt == "-k" and arg.strip().endswith(','): - support.error("Do not put spaces after commas in the list of k-mers sizes! Correct example: -k 21,33,55", log) - support.error("Please specify option (e.g. -1, -2, -s, etc) for the following paths: " + ", ".join(not_options[1:]) + "\n", log) - - # all parameters are stored here - cfg = dict() - # dataset is stored here. We are prepared for up to MAX_LIBS_NUMBER for each type of short-reads libs - dataset_data = [{} for i in range(options_storage.MAX_LIBS_NUMBER * - len(options_storage.SHORT_READS_TYPES.keys()) + - len(options_storage.LONG_READS_TYPES))] # "[{}]*num" doesn't work here! - - # auto detecting SPAdes mode (rna, meta, etc) if it is not a rerun (--continue or --restart-from) - if secondary_filling or not options_storage.will_rerun(options): - mode = options_storage.get_mode() - if mode is not None: - options.append(('--' + mode, '')) - - # for parsing options from "previous run command" - options_storage.continue_mode = False - options_storage.k_mers = None - for opt, arg in options: - if opt == '-o': - if not skip_output_dir: - if options_storage.output_dir is not None: - support.error('-o option was specified at least twice') - options_storage.output_dir = abspath(expanduser(arg)) - options_storage.dict_of_rel2abs[arg] = options_storage.output_dir - support.check_path_is_ascii(options_storage.output_dir, 'output directory') - elif opt == "--tmp-dir": - options_storage.tmp_dir = abspath(expanduser(arg)) - options_storage.dict_of_rel2abs[arg] = options_storage.tmp_dir - support.check_path_is_ascii(options_storage.tmp_dir, 'directory for temporary files') - elif opt == "--configs-dir": - options_storage.configs_dir = support.check_dir_existence(arg) - elif opt == "--reference": - options_storage.reference = support.check_file_existence(arg, 'reference', log) - elif opt == "--series-analysis": - options_storage.series_analysis = support.check_file_existence(arg, 'series-analysis', log) - elif opt == "--dataset": - options_storage.dataset_yaml_filename = support.check_file_existence(arg, 'dataset', log) - - elif opt in options_storage.reads_options: - support.add_to_dataset(opt, arg, dataset_data) - - elif opt == '-k': - if arg == 'auto': - options_storage.k_mers = arg - else: - options_storage.k_mers = list(map(int, arg.split(","))) - for k in options_storage.k_mers: - if k < options_storage.MIN_K or k > options_storage.MAX_K: - support.error('wrong k value ' + str(k) + ': all k values should be between %d and %d' % - (options_storage.MIN_K, options_storage.MAX_K), log) - if k % 2 == 0: - support.error('wrong k value ' + str(k) + ': all k values should be odd', log) - - elif opt == "--sc": - options_storage.single_cell = True - elif opt == "--meta": - options_storage.meta = True - elif opt == "--large-genome": - options_storage.large_genome = True - elif opt == "--plasmid": - options_storage.plasmid = True - elif opt == "--rna": - options_storage.rna = True - elif opt == "--iontorrent": - options_storage.iontorrent = True - elif opt == "--disable-gzip-output": - options_storage.disable_gzip_output = True - elif opt == "--disable-gzip-output:false": - options_storage.disable_gzip_output = False - elif opt == "--disable-rr": - options_storage.disable_rr = True - elif opt == "--disable-rr:false": - options_storage.disable_rr = False - - elif opt == "--only-error-correction": - if options_storage.only_assembler: - support.error('you cannot specify --only-error-correction and --only-assembler simultaneously') - options_storage.only_error_correction = True - elif opt == "--only-assembler": - if options_storage.only_error_correction: - support.error('you cannot specify --only-error-correction and --only-assembler simultaneously') - options_storage.only_assembler = True - - elif opt == "--read-buffer-size": - options_storage.read_buffer_size = int(arg) - elif opt == "--bh-heap-check": - options_storage.bh_heap_check = arg - elif opt == "--spades-heap-check": - options_storage.spades_heap_check = arg - - elif opt == "--continue": - options_storage.continue_mode = True - elif opt == "--restart-from": - if arg not in ['ec', 'as', 'mc', 'scc', 'tpp'] and not arg.startswith('k'): - support.error("wrong value for --restart-from option: " + arg + - " (should be 'ec', 'as', 'k', or 'mc'", log) - options_storage.continue_mode = True - options_storage.restart_from = arg - elif opt == "--stop-after": - if not skip_stop_after: - if arg not in ['ec', 'as', 'mc', 'scc', 'tpp'] and not arg.startswith('k'): - support.error("wrong value for --stop-after option: " + arg + - " (should be 'ec', 'as', 'k', or 'mc'", log) - options_storage.stop_after = arg - - elif opt == '-t' or opt == "--threads": - options_storage.threads = int(arg) - elif opt == '-m' or opt == "--memory": - options_storage.memory = int(arg) - elif opt == "--phred-offset": - if arg == 'auto': - options_storage.qvoffset = arg - elif arg in ['33', '64']: - options_storage.qvoffset = int(arg) - else: - support.error('wrong PHRED quality offset value: ' + arg + - ' (should be either 33, 64, or \'auto\')', log) - elif opt == "--cov-cutoff": - if arg == 'auto' or arg == 'off': - options_storage.cov_cutoff = arg - elif support.is_float(arg) and float(arg) > 0.0: - options_storage.cov_cutoff = float(arg) - else: - support.error('wrong value for --cov-cutoff option: ' + arg + - ' (should be a positive float number, or \'auto\', or \'off\')', log) - elif opt == '-i' or opt == "--iterations": - options_storage.iterations = int(arg) - - elif opt == "--debug": - options_storage.developer_mode = True - elif opt == "--debug:false": - options_storage.developer_mode = False - - #corrector - elif opt == "--mismatch-correction": - options_storage.mismatch_corrector = True - elif opt == "--mismatch-correction:false": - options_storage.mismatch_corrector = False - - elif opt == "--careful": - options_storage.mismatch_corrector = True - options_storage.careful = True - elif opt == "--careful:false": - options_storage.mismatch_corrector = False - options_storage.careful = False - - elif opt == '-v' or opt == "--version": - show_version() - elif opt == '-h' or opt == "--help": - show_usage(0) - elif opt == "--help-hidden": - show_usage(0, show_hidden=True) - - elif opt == "--test": - options_storage.set_test_options() - #break - elif opt == "--diploid": - options_storage.diploid_mode = True - elif opt == "--truseq": - options_storage.enable_truseq_mode() - else: - raise ValueError - - if options_storage.test_mode: - if options_storage.plasmid: - support.add_to_dataset('-1', os.path.join(spades_home, "test_dataset_plasmid/pl1.fq.gz"), dataset_data) - support.add_to_dataset('-2', os.path.join(spades_home, "test_dataset_plasmid/pl2.fq.gz"), dataset_data) - else: - support.add_to_dataset('-1', os.path.join(spades_home, "test_dataset/ecoli_1K_1.fq.gz"), dataset_data) - support.add_to_dataset('-2', os.path.join(spades_home, "test_dataset/ecoli_1K_2.fq.gz"), dataset_data) - - if not options_storage.output_dir: - support.error("the output_dir is not set! It is a mandatory parameter (-o output_dir).", log) - if not os.path.isdir(options_storage.output_dir): - if options_storage.continue_mode: - support.error("the output_dir should exist for --continue and for --restart-from!", log) - os.makedirs(options_storage.output_dir) - if options_storage.restart_from: - if options_storage.continue_mode: # saving parameters specified with --restart-from - if not support.dataset_is_empty(dataset_data): - support.error("you cannot specify reads with --restart-from option!", log) - options_storage.save_restart_options(log) - else: # overriding previous run parameters - options_storage.load_restart_options() - if options_storage.meta: - if options_storage.careful or options_storage.mismatch_corrector or options_storage.cov_cutoff != "off": - support.error("you cannot specify --careful, --mismatch-correction or --cov-cutoff in metagenomic mode!", log) - if options_storage.rna: - if options_storage.careful: - support.error("you cannot specify --careful in RNA-Seq mode!", log) - if options_storage.k_mers and options_storage.k_mers != 'auto' and len(options_storage.k_mers) > 1: - support.error("you cannot specify multiple k-mer sizes in RNA-Seq mode!", log) - if options_storage.continue_mode: - return None, None - - existing_dataset_data = None - processed_dataset_fpath = os.path.join(options_storage.output_dir, "input_dataset.yaml") - if load_processed_dataset: - if os.path.isfile(processed_dataset_fpath): - try: - existing_dataset_data = pyyaml.load(open(processed_dataset_fpath, 'r')) - except pyyaml.YAMLError: - existing_dataset_data = None - if existing_dataset_data is not None: - dataset_data = existing_dataset_data - else: - if options_storage.dataset_yaml_filename: - try: - dataset_data = pyyaml.load(open(options_storage.dataset_yaml_filename, 'r')) - except pyyaml.YAMLError: - _, exc, _ = sys.exc_info() - support.error('exception caught while parsing YAML file (' + options_storage.dataset_yaml_filename + '):\n' + str(exc)) - dataset_data = support.relative2abs_paths(dataset_data, os.path.dirname(options_storage.dataset_yaml_filename)) - else: - dataset_data = support.correct_dataset(dataset_data) - dataset_data = support.relative2abs_paths(dataset_data, os.getcwd()) - options_storage.dataset_yaml_filename = processed_dataset_fpath - - support.check_dataset_reads(dataset_data, options_storage.only_assembler, log) - if not support.get_lib_ids_by_type(dataset_data, spades_logic.READS_TYPES_USED_IN_CONSTRUCTION): - support.error('you should specify at least one unpaired, paired-end, or high-quality mate-pairs library!') - if options_storage.rna: - if len(dataset_data) != len(support.get_lib_ids_by_type(dataset_data, spades_logic.READS_TYPES_USED_IN_RNA_SEQ)): - support.error('you cannot specify any data types except ' + - ', '.join(spades_logic.READS_TYPES_USED_IN_RNA_SEQ) + ' in RNA-Seq mode!') - if len(support.get_lib_ids_by_type(dataset_data, 'paired-end')) > 1: - support.error('you cannot specify more than one paired-end library in RNA-Seq mode!') - - if existing_dataset_data is None: - pyyaml.dump(dataset_data, open(options_storage.dataset_yaml_filename, 'w')) - - options_storage.set_default_values() - ### FILLING cfg - cfg["common"] = empty_config() - cfg["dataset"] = empty_config() - if not options_storage.only_assembler: - cfg["error_correction"] = empty_config() - if not options_storage.only_error_correction: - cfg["assembly"] = empty_config() - - # common - cfg["common"].__dict__["output_dir"] = options_storage.output_dir - cfg["common"].__dict__["tmp_dir"] = options_storage.tmp_dir - cfg["common"].__dict__["max_threads"] = options_storage.threads - cfg["common"].__dict__["max_memory"] = options_storage.memory - cfg["common"].__dict__["developer_mode"] = options_storage.developer_mode - if options_storage.series_analysis: - cfg["common"].__dict__["series_analysis"] = options_storage.series_analysis - - # dataset section - cfg["dataset"].__dict__["yaml_filename"] = options_storage.dataset_yaml_filename - if options_storage.developer_mode and options_storage.reference: - cfg["dataset"].__dict__["reference"] = options_storage.reference - - # error correction - if (not options_storage.only_assembler) and (options_storage.iterations > 0): - cfg["error_correction"].__dict__["output_dir"] = os.path.join(cfg["common"].output_dir, "corrected") - cfg["error_correction"].__dict__["max_iterations"] = options_storage.iterations - cfg["error_correction"].__dict__["gzip_output"] = not options_storage.disable_gzip_output - if options_storage.qvoffset: - cfg["error_correction"].__dict__["qvoffset"] = options_storage.qvoffset - if options_storage.bh_heap_check: - cfg["error_correction"].__dict__["heap_check"] = options_storage.bh_heap_check - cfg["error_correction"].__dict__["iontorrent"] = options_storage.iontorrent - if options_storage.meta or options_storage.large_genome: - cfg["error_correction"].__dict__["count_filter_singletons"] = 1 - if options_storage.read_buffer_size: - cfg["error_correction"].__dict__["read_buffer_size"] = options_storage.read_buffer_size - - # assembly - if not options_storage.only_error_correction: - if options_storage.k_mers == 'auto' and options_storage.restart_from is None: - options_storage.k_mers = None - if options_storage.k_mers: - cfg["assembly"].__dict__["iterative_K"] = options_storage.k_mers - elif options_storage.rna: - cfg["assembly"].__dict__["iterative_K"] = options_storage.K_MERS_RNA - else: - cfg["assembly"].__dict__["iterative_K"] = options_storage.K_MERS_SHORT - cfg["assembly"].__dict__["disable_rr"] = options_storage.disable_rr - cfg["assembly"].__dict__["diploid_mode"] = options_storage.diploid_mode - cfg["assembly"].__dict__["cov_cutoff"] = options_storage.cov_cutoff - if options_storage.spades_heap_check: - cfg["assembly"].__dict__["heap_check"] = options_storage.spades_heap_check - if options_storage.read_buffer_size: - cfg["assembly"].__dict__["read_buffer_size"] = options_storage.read_buffer_size - cfg["assembly"].__dict__["correct_scaffolds"] = options_storage.correct_scaffolds - - #corrector can work only if contigs exist (not only error correction) - if (not options_storage.only_error_correction) and options_storage.mismatch_corrector: - cfg["mismatch_corrector"] = empty_config() - cfg["mismatch_corrector"].__dict__["skip-masked"] = None - cfg["mismatch_corrector"].__dict__["bwa"] = os.path.join(bin_home, "bwa-spades") - cfg["mismatch_corrector"].__dict__["threads"] = options_storage.threads - cfg["mismatch_corrector"].__dict__["output-dir"] = options_storage.output_dir - cfg["run_truseq_postprocessing"] = options_storage.run_truseq_postprocessing - return cfg, dataset_data - -def check_cfg_for_partial_run(cfg, type='restart-from'): # restart-from ot stop-after - if type == 'restart-from': - check_point = options_storage.restart_from - action = 'restart from' - verb = 'was' - elif type == 'stop-after': - check_point = options_storage.stop_after - action = 'stop after' - verb = 'is' - else: - return - - if check_point == 'ec' and ("error_correction" not in cfg): - support.error("failed to " + action + " 'read error correction' ('" + check_point + "') because this stage " + verb + " not specified!") - if check_point == 'mc' and ("mismatch_corrector" not in cfg): - support.error("failed to " + action + " 'mismatch correction' ('" + check_point + "') because this stage " + verb + " not specified!") - if check_point == 'as' or check_point.startswith('k'): - if "assembly" not in cfg: - support.error("failed to " + action + " 'assembling' ('" + check_point + "') because this stage " + verb + " not specified!") - if check_point.startswith('k'): - correct_k = False - k_to_check = options_storage.k_mers - if not k_to_check: - if options_storage.auto_K_allowed(): - k_to_check = list(set(options_storage.K_MERS_SHORT + options_storage.K_MERS_150 + options_storage.K_MERS_250)) - else: - k_to_check = options_storage.K_MERS_SHORT - for k in k_to_check: - if check_point == ("k%d" % k) or check_point.startswith("k%d:" % k): - correct_k = True - break - if not correct_k: - k_str = check_point[1:] - if k_str.find(":") != -1: - k_str = k_str[:k_str.find(":")] - support.error("failed to " + action + " K=%s because this K " % k_str + verb + " not specified!") - - -def get_options_from_params(params_filename, running_script): - cmd_line = None - options = None - if not os.path.isfile(params_filename): - return cmd_line, options, "failed to parse command line of the previous run (%s not found)!" % params_filename - params = open(params_filename, 'r') - cmd_line = params.readline().strip() - spades_prev_version = None - for line in params: - if line.find('SPAdes version:') != -1: - spades_prev_version = line.split('SPAdes version:')[1] - break - params.close() - if spades_prev_version is None: - return cmd_line, options, "failed to parse SPAdes version of the previous run!" - if spades_prev_version.strip() != spades_version.strip(): - return cmd_line, options, "SPAdes version of the previous run (%s) is not equal " \ - "to the current version of SPAdes (%s)!" \ - % (spades_prev_version.strip(), spades_version.strip()) - if 'Command line: ' not in cmd_line or '\t' not in cmd_line: - return cmd_line, options, "failed to parse executable script of the previous run!" - options = cmd_line.split('\t')[1:] - prev_running_script = cmd_line.split('\t')[0][len('Command line: '):] - # we cannot restart/continue spades.py run with metaspades.py/rnaspades.py/etc and vice versa - if os.path.basename(prev_running_script) != os.path.basename(running_script): - return cmd_line, options, "executable script of the previous run (%s) is not equal " \ - "to the current executable script (%s)!" \ - % (os.path.basename(prev_running_script), - os.path.basename(running_script)) - return cmd_line, options, "" - - -def show_version(): - options_storage.version(spades_version) - sys.exit(0) - - -def show_usage(code, show_hidden=False): - options_storage.usage(spades_version, show_hidden=show_hidden) - sys.exit(code) - - -def main(args): - os.environ["LC_ALL"] = "C" - - if len(args) == 1: - show_usage(0) - - log = logging.getLogger('spades') - log.setLevel(logging.DEBUG) - - console = logging.StreamHandler(sys.stdout) - console.setFormatter(logging.Formatter('%(message)s')) - console.setLevel(logging.DEBUG) - log.addHandler(console) - - support.check_binaries(bin_home, log) - - # parse options and safe all parameters to cfg - options = args - cfg, dataset_data = fill_cfg(options, log) - - if options_storage.continue_mode: - cmd_line, options, err_msg = get_options_from_params(os.path.join(options_storage.output_dir, "params.txt"), args[0]) - if err_msg: - support.error(err_msg + " Please restart from the beginning or specify another output directory.") - cfg, dataset_data = fill_cfg(options, log, secondary_filling=True) - if options_storage.restart_from: - check_cfg_for_partial_run(cfg, type='restart-from') - options_storage.continue_mode = True - if options_storage.stop_after: - check_cfg_for_partial_run(cfg, type='stop-after') - - log_filename = os.path.join(cfg["common"].output_dir, "spades.log") - if options_storage.continue_mode: - log_handler = logging.FileHandler(log_filename, mode='a') - else: - log_handler = logging.FileHandler(log_filename, mode='w') - log.addHandler(log_handler) - - if options_storage.continue_mode: - log.info("\n======= SPAdes pipeline continued. Log can be found here: " + log_filename + "\n") - log.info("Restored from " + cmd_line) - if options_storage.restart_from: - updated_params = "" - skip_next = False - for v in args[1:]: - if v == '-o' or v == '--restart-from': - skip_next = True - continue - if skip_next or v.startswith('--restart-from='): # you can specify '--restart-from=k33' but not '-o=out_dir' - skip_next = False - continue - updated_params += "\t" + v - updated_params = updated_params.strip() - log.info("with updated parameters: " + updated_params) - cmd_line += "\t" + updated_params - log.info("") - - params_filename = os.path.join(cfg["common"].output_dir, "params.txt") - params_handler = logging.FileHandler(params_filename, mode='w') - log.addHandler(params_handler) - - if options_storage.continue_mode: - log.info(cmd_line) - else: - command = "Command line: " - for v in args: - # substituting relative paths with absolute ones (read paths, output dir path, etc) - v, prefix = support.get_option_prefix(v) - if v in options_storage.dict_of_rel2abs.keys(): - v = options_storage.dict_of_rel2abs[v] - if prefix: - command += prefix + ":" - command += v + "\t" - log.info(command) - - # special case -# if "mismatch_corrector" in cfg and not support.get_lib_ids_by_type(dataset_data, 'paired-end'): -# support.warning('cannot perform mismatch correction without at least one paired-end library! Skipping this step.', log) -# del cfg["mismatch_corrector"] - - print_used_values(cfg, log) - log.removeHandler(params_handler) - - support.check_single_reads_in_options(options, log) - - if not options_storage.continue_mode: - log.info("\n======= SPAdes pipeline started. Log can be found here: " + log_filename + "\n") - - # splitting interlaced reads and processing Ns in additional contigs if needed - if support.dataset_has_interlaced_reads(dataset_data) or support.dataset_has_additional_contigs(dataset_data)\ - or support.dataset_has_nxmate_reads(dataset_data): - dir_for_split_reads = os.path.join(options_storage.output_dir, 'split_input') - if support.dataset_has_interlaced_reads(dataset_data) or support.dataset_has_nxmate_reads(dataset_data): - if not os.path.isdir(dir_for_split_reads): - os.makedirs(dir_for_split_reads) - if support.dataset_has_interlaced_reads(dataset_data): - dataset_data = support.split_interlaced_reads(dataset_data, dir_for_split_reads, log) - if support.dataset_has_nxmate_reads(dataset_data): - dataset_data = support.process_nxmate_reads(dataset_data, dir_for_split_reads, log) - if support.dataset_has_additional_contigs(dataset_data): - dataset_data = support.process_Ns_in_additional_contigs(dataset_data, dir_for_split_reads, log) - options_storage.dataset_yaml_filename = os.path.join(options_storage.output_dir, "input_dataset.yaml") - pyyaml.dump(dataset_data, open(options_storage.dataset_yaml_filename, 'w')) - cfg["dataset"].yaml_filename = options_storage.dataset_yaml_filename - - try: - # copying configs before all computations (to prevent its changing at run time) - tmp_configs_dir = os.path.join(cfg["common"].output_dir, "configs") - if os.path.isdir(tmp_configs_dir) and not options_storage.continue_mode: - shutil.rmtree(tmp_configs_dir) - if not os.path.isdir(tmp_configs_dir): - if options_storage.configs_dir: - dir_util.copy_tree(options_storage.configs_dir, tmp_configs_dir, preserve_times=False, preserve_mode=False) - else: - dir_util.copy_tree(os.path.join(spades_home, "configs"), tmp_configs_dir, preserve_times=False, preserve_mode=False) - - corrected_dataset_yaml_filename = '' - if "error_correction" in cfg: - STAGE_NAME = "Read error correction" - bh_cfg = merge_configs(cfg["error_correction"], cfg["common"]) - corrected_dataset_yaml_filename = os.path.join(bh_cfg.output_dir, "corrected.yaml") - ec_is_needed = True - only_compressing_is_needed = False - if os.path.isfile(corrected_dataset_yaml_filename) and options_storage.continue_mode \ - and not options_storage.restart_from == "ec": - if not bh_cfg.gzip_output or \ - support.dataset_has_gzipped_reads(pyyaml.load(open(corrected_dataset_yaml_filename, 'r'))): - log.info("\n===== Skipping %s (already processed). \n" % STAGE_NAME) - ec_is_needed = False - else: - only_compressing_is_needed = True - if ec_is_needed: - if not only_compressing_is_needed: - support.continue_from_here(log) - - if "HEAPCHECK" in os.environ: - del os.environ["HEAPCHECK"] - if "heap_check" in bh_cfg.__dict__: - os.environ["HEAPCHECK"] = bh_cfg.heap_check - - if os.path.exists(bh_cfg.output_dir): - shutil.rmtree(bh_cfg.output_dir) - os.makedirs(bh_cfg.output_dir) - - bh_cfg.__dict__["dataset_yaml_filename"] = cfg["dataset"].yaml_filename - log.info("\n===== %s started. \n" % STAGE_NAME) - - hammer_logic.run_hammer(corrected_dataset_yaml_filename, tmp_configs_dir, bin_home, bh_cfg, dataset_data, - ext_python_modules_home, only_compressing_is_needed, log) - log.info("\n===== %s finished. \n" % STAGE_NAME) - if options_storage.stop_after == 'ec': - support.finish_here(log) - - result_contigs_filename = os.path.join(cfg["common"].output_dir, options_storage.contigs_name) - result_scaffolds_filename = os.path.join(cfg["common"].output_dir, options_storage.scaffolds_name) - result_assembly_graph_filename = os.path.join(cfg["common"].output_dir, options_storage.assembly_graph_name) - result_assembly_graph_filename_gfa = os.path.join(cfg["common"].output_dir, options_storage.assembly_graph_name_gfa) - result_contigs_paths_filename = os.path.join(cfg["common"].output_dir, options_storage.contigs_paths) - result_scaffolds_paths_filename = os.path.join(cfg["common"].output_dir, options_storage.scaffolds_paths) - result_transcripts_filename = os.path.join(cfg["common"].output_dir, options_storage.transcripts_name) - result_transcripts_paths_filename = os.path.join(cfg["common"].output_dir, options_storage.transcripts_paths) - truseq_long_reads_file_base = os.path.join(cfg["common"].output_dir, "truseq_long_reads") - truseq_long_reads_file = truseq_long_reads_file_base + ".fasta" - misc_dir = os.path.join(cfg["common"].output_dir, "misc") - ### if mismatch correction is enabled then result contigs are copied to misc directory - assembled_contigs_filename = os.path.join(misc_dir, "assembled_contigs.fasta") - assembled_scaffolds_filename = os.path.join(misc_dir, "assembled_scaffolds.fasta") - if "assembly" in cfg and not options_storage.run_completed: - STAGE_NAME = "Assembling" - spades_cfg = merge_configs(cfg["assembly"], cfg["common"]) - spades_cfg.__dict__["result_contigs"] = result_contigs_filename - spades_cfg.__dict__["result_scaffolds"] = result_scaffolds_filename - spades_cfg.__dict__["result_graph"] = result_assembly_graph_filename - spades_cfg.__dict__["result_graph_gfa"] = result_assembly_graph_filename_gfa - spades_cfg.__dict__["result_contigs_paths"] = result_contigs_paths_filename - spades_cfg.__dict__["result_scaffolds_paths"] = result_scaffolds_paths_filename - spades_cfg.__dict__["result_transcripts"] = result_transcripts_filename - spades_cfg.__dict__["result_transcripts_paths"] = result_transcripts_paths_filename - - if options_storage.continue_mode and (os.path.isfile(spades_cfg.result_contigs) - or ("mismatch_corrector" in cfg and - os.path.isfile(assembled_contigs_filename)) - or (options_storage.truseq_mode and os.path.isfile(assembled_scaffolds_filename)))\ - and not options_storage.restart_from == 'as' \ - and not options_storage.restart_from == 'scc' \ - and not (options_storage.restart_from and options_storage.restart_from.startswith('k')): - - log.info("\n===== Skipping %s (already processed). \n" % STAGE_NAME) - # calculating latest_dir for the next stages - latest_dir = support.get_latest_dir(os.path.join(spades_cfg.output_dir, "K*")) - if not latest_dir: - support.error("failed to continue the previous run! Please restart from previous stages or from the beginning.", log) - else: - old_result_files = [result_contigs_filename, result_scaffolds_filename, - assembled_contigs_filename, assembled_scaffolds_filename] - for old_result_file in old_result_files: - if os.path.isfile(old_result_file): - os.remove(old_result_file) - - if options_storage.restart_from == 'as': - support.continue_from_here(log) - - if os.path.isfile(corrected_dataset_yaml_filename): - dataset_data = pyyaml.load(open(corrected_dataset_yaml_filename, 'r')) - dataset_data = support.relative2abs_paths(dataset_data, os.path.dirname(corrected_dataset_yaml_filename)) - if spades_cfg.disable_rr: - spades_cfg.__dict__["rr_enable"] = False - else: - spades_cfg.__dict__["rr_enable"] = True - - if "HEAPCHECK" in os.environ: - del os.environ["HEAPCHECK"] - if "heap_check" in spades_cfg.__dict__: - os.environ["HEAPCHECK"] = spades_cfg.heap_check - - log.info("\n===== %s started.\n" % STAGE_NAME) - - # creating dataset - dataset_filename = os.path.join(spades_cfg.output_dir, "dataset.info") - if not os.path.isfile(dataset_filename) or not options_storage.continue_mode: - dataset_file = open(dataset_filename, 'w') - import process_cfg - if os.path.isfile(corrected_dataset_yaml_filename): - dataset_file.write("reads" + '\t' + process_cfg.process_spaces(corrected_dataset_yaml_filename) + '\n') - else: - dataset_file.write("reads" + '\t' + process_cfg.process_spaces(cfg["dataset"].yaml_filename) + '\n') - if spades_cfg.developer_mode and "reference" in cfg["dataset"].__dict__: - dataset_file.write("reference_genome" + '\t') - dataset_file.write(process_cfg.process_spaces(cfg["dataset"].reference) + '\n') - dataset_file.close() - spades_cfg.__dict__["dataset"] = dataset_filename - - used_K = spades_logic.run_spades(tmp_configs_dir, bin_home, spades_cfg, dataset_data, ext_python_modules_home, log) - - if os.path.isdir(misc_dir) and not options_storage.continue_mode: - shutil.rmtree(misc_dir) - if not os.path.isdir(misc_dir): - os.makedirs(misc_dir) - - if options_storage.continue_mode and options_storage.restart_from and options_storage.restart_from.startswith('k'): - k_str = options_storage.restart_from[1:] - if k_str.find(":") != -1: - k_str = k_str[:k_str.find(":")] - support.error("failed to continue from K=%s because this K was not processed in the original run!" % k_str, log) - log.info("\n===== %s finished. Used k-mer sizes: %s \n" % (STAGE_NAME, ', '.join(map(str, used_K)))) - if not options_storage.run_completed: - if options_storage.stop_after == 'as' or options_storage.stop_after == 'scc' or (options_storage.stop_after and options_storage.stop_after.startswith('k')): - support.finish_here(log) - - #postprocessing - if cfg["run_truseq_postprocessing"] and not options_storage.run_completed: - if options_storage.continue_mode and os.path.isfile(truseq_long_reads_file_base + ".fastq") and not options_storage.restart_from == 'tpp': - log.info("\n===== Skipping %s (already processed). \n" % "TruSeq postprocessing") - else: - support.continue_from_here(log) - if os.path.isfile(result_scaffolds_filename): - shutil.move(result_scaffolds_filename, assembled_scaffolds_filename) - reads_library = dataset_data[0] - alignment_bin = os.path.join(bin_home, "bwa-spades") - alignment_dir = os.path.join(cfg["common"].output_dir, "alignment") - sam_files = alignment.align_bwa(alignment_bin, assembled_scaffolds_filename, dataset_data, alignment_dir, log, options_storage.threads) - moleculo_postprocessing.moleculo_postprocessing(assembled_scaffolds_filename, truseq_long_reads_file_base, sam_files, log) - if options_storage.stop_after == 'tpp': - support.finish_here(log) - - #corrector - if "mismatch_corrector" in cfg and not options_storage.run_completed and \ - (os.path.isfile(result_contigs_filename) or - (options_storage.continue_mode and os.path.isfile(assembled_contigs_filename))): - STAGE_NAME = "Mismatch correction" - to_correct = dict() - to_correct["contigs"] = (result_contigs_filename, assembled_contigs_filename) - if os.path.isfile(result_scaffolds_filename) or (options_storage.continue_mode and - os.path.isfile(assembled_scaffolds_filename)): - to_correct["scaffolds"] = (result_scaffolds_filename, assembled_scaffolds_filename) - - # moving assembled contigs (scaffolds) to misc dir - for assembly_type, (old, new) in to_correct.items(): - if options_storage.continue_mode and os.path.isfile(new): - continue - if os.path.isfile(old): - shutil.move(old, new) - - if options_storage.continue_mode and os.path.isfile(result_contigs_filename) and \ - (os.path.isfile(result_scaffolds_filename) or not os.path.isfile(assembled_scaffolds_filename)) \ - and not options_storage.restart_from == 'mc': - log.info("\n===== Skipping %s (already processed). \n" % STAGE_NAME) - else: - if options_storage.restart_from == 'mc': - support.continue_from_here(log) - - log.info("\n===== %s started." % STAGE_NAME) - # detecting paired-end library with the largest insert size - cfg["mismatch_corrector"].__dict__["dataset"] = cfg["dataset"].yaml_filename - #TODO: add reads orientation - - import corrector_logic - corrector_cfg = cfg["mismatch_corrector"] - # processing contigs and scaffolds (or only contigs) - for assembly_type, (corrected, assembled) in to_correct.items(): - if options_storage.continue_mode and os.path.isfile(corrected): - log.info("\n== Skipping processing of " + assembly_type + " (already processed)\n") - continue - if not os.path.isfile(assembled) or os.path.getsize(assembled) == 0: - log.info("\n== Skipping processing of " + assembly_type + " (empty file)\n") - continue - support.continue_from_here(log) - log.info("\n== Processing of " + assembly_type + "\n") - - tmp_dir_for_corrector = os.path.join(cfg["common"].output_dir, "mismatch_corrector", assembly_type) - - cfg["mismatch_corrector"].__dict__["output_dir"] = tmp_dir_for_corrector - # correcting - corr_cfg = merge_configs(cfg["mismatch_corrector"], cfg["common"]) - - result_corrected_filename = os.path.join(tmp_dir_for_corrector, "corrected_contigs.fasta") - corrector_logic.run_corrector( tmp_configs_dir, bin_home, corr_cfg, - ext_python_modules_home, log, assembled, result_corrected_filename) - - if os.path.isfile(result_corrected_filename): - shutil.copyfile(result_corrected_filename, corrected) - tmp_d = os.path.join(tmp_dir_for_corrector, "tmp") - if os.path.isdir(tmp_d) and not cfg["common"].developer_mode: - shutil.rmtree(tmp_d) - log.info("\n===== %s finished.\n" % STAGE_NAME) - if options_storage.stop_after == 'mc': - support.finish_here(log) - - if not cfg["common"].developer_mode and os.path.isdir(tmp_configs_dir): - shutil.rmtree(tmp_configs_dir) - - if not options_storage.run_completed: - #log.info("") - if "error_correction" in cfg and os.path.isdir(os.path.dirname(corrected_dataset_yaml_filename)): - log.info(" * Corrected reads are in " + support.process_spaces(os.path.dirname(corrected_dataset_yaml_filename) + "/")) - if "assembly" in cfg and os.path.isfile(result_contigs_filename): - message = " * Assembled contigs are in " + support.process_spaces(result_contigs_filename) - log.info(message) - if options_storage.rna: - if "assembly" in cfg and os.path.isfile(result_transcripts_filename): - message = " * Assembled transcripts are in " + support.process_spaces(result_transcripts_filename) - log.info(message) - if "assembly" in cfg and os.path.isfile(result_transcripts_paths_filename): - message = " * Paths in the assembly graph corresponding to the transcripts are in " + \ - support.process_spaces(result_transcripts_paths_filename) - log.info(message) - else: - if "assembly" in cfg and os.path.isfile(result_scaffolds_filename): - message = " * Assembled scaffolds are in " + support.process_spaces(result_scaffolds_filename) - log.info(message) - if "assembly" in cfg and os.path.isfile(result_assembly_graph_filename): - message = " * Assembly graph is in " + support.process_spaces(result_assembly_graph_filename) - log.info(message) - if "assembly" in cfg and os.path.isfile(result_assembly_graph_filename_gfa): - message = " * Assembly graph in GFA format is in " + support.process_spaces(result_assembly_graph_filename_gfa) - log.info(message) - if "assembly" in cfg and os.path.isfile(result_contigs_paths_filename): - message = " * Paths in the assembly graph corresponding to the contigs are in " + \ - support.process_spaces(result_contigs_paths_filename) - log.info(message) - if "assembly" in cfg and os.path.isfile(result_scaffolds_paths_filename): - message = " * Paths in the assembly graph corresponding to the scaffolds are in " + \ - support.process_spaces(result_scaffolds_paths_filename) - log.info(message) - #log.info("") - - #breaking scaffolds - if os.path.isfile(result_scaffolds_filename): - if not os.path.isdir(misc_dir): - os.makedirs(misc_dir) - result_broken_scaffolds = os.path.join(misc_dir, "broken_scaffolds.fasta") - if not os.path.isfile(result_broken_scaffolds) or not options_storage.continue_mode: - modified, broken_scaffolds = support.break_scaffolds(result_scaffolds_filename, - options_storage.THRESHOLD_FOR_BREAKING_SCAFFOLDS) - if modified: - support.write_fasta(result_broken_scaffolds, broken_scaffolds) - #log.info(" * Scaffolds broken by " + str(options_storage.THRESHOLD_FOR_BREAKING_SCAFFOLDS) + - # " Ns are in " + result_broken_scaffolds) - - ### printing WARNINGS SUMMARY - if not support.log_warnings(log): - log.info("\n======= SPAdes pipeline finished.") # otherwise it finished WITH WARNINGS - - if options_storage.test_mode: - if options_storage.truseq_mode: - if not os.path.isfile(truseq_long_reads_file): - support.error("TEST FAILED: %s does not exist!" % truseq_long_reads_file) - elif options_storage.rna: - if not os.path.isfile(result_transcripts_filename): - support.error("TEST FAILED: %s does not exist!" % result_transcripts_filename) - else: - for result_filename in [result_contigs_filename, result_scaffolds_filename]: - if os.path.isfile(result_filename): - result_fasta = list(support.read_fasta(result_filename)) - # correctness check: should be one contig of length 1000 bp - correct_number = 1 - if options_storage.plasmid: - correct_length = 9667 - else: - correct_length = 1000 - if not len(result_fasta): - support.error("TEST FAILED: %s does not contain contigs!" % result_filename) - elif len(result_fasta) > correct_number: - support.error("TEST FAILED: %s contains more than %d contig (%d)!" % - (result_filename, correct_number, len(result_fasta))) - elif len(result_fasta[0][1]) != correct_length: - if len(result_fasta[0][1]) > correct_length: - relation = "more" - else: - relation = "less" - support.error("TEST FAILED: %s contains %s than %d bp (%d bp)!" % - (result_filename, relation, correct_length, len(result_fasta[0][1]))) - else: - support.error("TEST FAILED: " + result_filename + " does not exist!") - log.info("\n========= TEST PASSED CORRECTLY.") - - - log.info("\nSPAdes log can be found here: " + log_filename) - log.info("") - log.info("Thank you for using SPAdes!") - log.removeHandler(log_handler) - - except Exception: - exc_type, exc_value, _ = sys.exc_info() - if exc_type == SystemExit: - sys.exit(exc_value) - else: - if exc_type == OSError and exc_value.errno == errno.ENOEXEC: # Exec format error - support.error("It looks like you are using SPAdes binaries for another platform.\n" + - support.get_spades_binaries_info_message()) - else: - log.exception(exc_value) - support.error("exception caught: %s" % exc_type, log) - except BaseException: # since python 2.5 system-exiting exceptions (e.g. KeyboardInterrupt) are derived from BaseException - exc_type, exc_value, _ = sys.exc_info() - if exc_type == SystemExit: - sys.exit(exc_value) - else: - log.exception(exc_value) - support.error("exception caught: %s" % exc_type, log) - - -if __name__ == '__main__': - main(sys.argv) diff --git a/src/SPAdes-3.10.1-Linux/share/spades/VERSION b/src/SPAdes-3.10.1-Linux/share/spades/VERSION deleted file mode 100644 index f870be2..0000000 --- a/src/SPAdes-3.10.1-Linux/share/spades/VERSION +++ /dev/null @@ -1 +0,0 @@ -3.10.1 diff --git a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/AFUMIGATUS_AF1163_QUAKE.info b/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/AFUMIGATUS_AF1163_QUAKE.info deleted file mode 100644 index 8ff2654..0000000 --- a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/AFUMIGATUS_AF1163_QUAKE.info +++ /dev/null @@ -1,5 +0,0 @@ -reads AFUMIGATUS_AF1163_QUAKE.yaml -single_cell false - -; RL 100 -; IS 185 diff --git a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/AFUMIGATUS_AF293_QUAKE.info b/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/AFUMIGATUS_AF293_QUAKE.info deleted file mode 100644 index 057ec34..0000000 --- a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/AFUMIGATUS_AF293_QUAKE.info +++ /dev/null @@ -1,5 +0,0 @@ -reads AFUMIGATUS_AF293_QUAKE.yaml -single_cell false - -; RL 100 -; IS 200 diff --git a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/BFAECIUM_QUAKE.info b/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/BFAECIUM_QUAKE.info deleted file mode 100644 index aba3819..0000000 --- a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/BFAECIUM_QUAKE.info +++ /dev/null @@ -1,8 +0,0 @@ -reads BFAECIUM_QUAKE.yaml -single_cell false -reference_genome ../../../data/input/B.faecium/ref.fasta.gz - -; RL 150 -; IS 250 -; jump_rl 150 -; jump_is 7450 diff --git a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/CSMUELLERI_BH.info b/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/CSMUELLERI_BH.info deleted file mode 100644 index 7a0a1ce..0000000 --- a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/CSMUELLERI_BH.info +++ /dev/null @@ -1,6 +0,0 @@ -reads CSMUELLERI_BH.yaml -single_cell false -reference_genome ../../../data/input/C.S.muelleri/4085179.chromosome1.final.fasta.gz - -; RL 36 -; IS 36 diff --git a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/DELTA_BH.info b/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/DELTA_BH.info deleted file mode 100644 index f316bfc..0000000 --- a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/DELTA_BH.info +++ /dev/null @@ -1,5 +0,0 @@ -reads DELTA_BH.yaml -single_cell true - -; RL 100 -; IS 250 diff --git a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/DMELAGONASTER.info b/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/DMELAGONASTER.info deleted file mode 100644 index 9397cb6..0000000 --- a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/DMELAGONASTER.info +++ /dev/null @@ -1,4 +0,0 @@ -reads DMELAGONASTER.yaml -single_cell false -reference_genome "/storage/data/input/D.melanogaster/dmel-all-chromosome-r5.47.fasta.gz" - diff --git a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/ECOLI_IS220_BH.info b/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/ECOLI_IS220_BH.info deleted file mode 100644 index 4b8273b..0000000 --- a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/ECOLI_IS220_BH.info +++ /dev/null @@ -1,6 +0,0 @@ -reads ECOLI_IS220_BH.yaml -single_cell false -reference_genome /acestorage/data/input/Bacteria/E.coli/K12/MG1655-K12.fasta.gz - -; RL 100 -; IS 220 diff --git a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/ECOLI_IS220_BH_iter4.info b/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/ECOLI_IS220_BH_iter4.info deleted file mode 100644 index 876c549..0000000 --- a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/ECOLI_IS220_BH_iter4.info +++ /dev/null @@ -1,6 +0,0 @@ -reads ECOLI_IS220_BH_iter4.yaml -single_cell true -reference_genome ../../../data/input/Bacteria/E.coli/K12/MG1655-K12.fasta.gz - -; RL 100 -; IS 270 diff --git a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/ECOLI_IS220_IDEAL.info b/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/ECOLI_IS220_IDEAL.info deleted file mode 100644 index f1eb60b..0000000 --- a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/ECOLI_IS220_IDEAL.info +++ /dev/null @@ -1,6 +0,0 @@ -reads ECOLI_IS220_IDEAL.yaml -single_cell false -reference_genome ../../../data/input/Bacteria/E.coli/K12/MG1655-K12.fasta.gz - -; RL 100 -; IS 220 diff --git a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/ECOLI_IS220_QUAKE.info b/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/ECOLI_IS220_QUAKE.info deleted file mode 100644 index 4c3c55d..0000000 --- a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/ECOLI_IS220_QUAKE.info +++ /dev/null @@ -1,6 +0,0 @@ -reads ECOLI_IS220_QUAKE.yaml -single_cell false -reference_genome ../../../data/input/Bacteria/E.coli/K12/MG1655-K12.fasta.gz - -; RL 100 -; IS 220 diff --git a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/ECOLI_IS220_QUAKE_PB.info b/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/ECOLI_IS220_QUAKE_PB.info deleted file mode 100644 index 8ce210e..0000000 --- a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/ECOLI_IS220_QUAKE_PB.info +++ /dev/null @@ -1,6 +0,0 @@ -reads ECOLI_IS220_QUAKE_PB.yaml -single_cell false -;reference_genome ../../../data/input/Bacteria/E.coli/K12/MG1655-K12.fasta.gz - -; RL 100 -; IS 220 diff --git a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/ECOLI_IS480_QUAKE.info b/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/ECOLI_IS480_QUAKE.info deleted file mode 100644 index edf98a3..0000000 --- a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/ECOLI_IS480_QUAKE.info +++ /dev/null @@ -1,6 +0,0 @@ -reads ECOLI_IS480_QUAKE.yaml -single_cell false -reference_genome ../../../data/input/Bacteria/E.coli/K12/MG1655-K12.fasta.gz - -; RL 100 -; IS 480 diff --git a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/ECOLI_JCVI_BH.info b/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/ECOLI_JCVI_BH.info deleted file mode 100644 index 4d81720..0000000 --- a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/ECOLI_JCVI_BH.info +++ /dev/null @@ -1,7 +0,0 @@ -reads ECOLI_JCVI_BH.yaml -single_cell true -reference_genome ../../../data/input/E.coli/MG1655-K12.fasta.gz - -; RL 100 -; IS 210 -; jump_is 3000 diff --git a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/ECOLI_JCVI_BIG_JUMP_BH.info b/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/ECOLI_JCVI_BIG_JUMP_BH.info deleted file mode 100644 index 1485d0c..0000000 --- a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/ECOLI_JCVI_BIG_JUMP_BH.info +++ /dev/null @@ -1,7 +0,0 @@ -reads ECOLI_JCVI_BIG_JUMP_BH.yaml -single_cell true -reference_genome ../../../data/input/E.coli/MG1655-K12.fasta.gz - -; RL 100 -; IS 210 -; jump_is 3000 diff --git a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/ECOLI_JGI.info b/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/ECOLI_JGI.info deleted file mode 100644 index 388beda..0000000 --- a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/ECOLI_JGI.info +++ /dev/null @@ -1,6 +0,0 @@ -reads ECOLI_JGI.yaml -single_cell true -reference_genome ../../../data/input/E.coli/MG1655-K12.fasta.gz - -; RL 100 -; IS 220 diff --git a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/ECOLI_JGI_0005_BH.info b/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/ECOLI_JGI_0005_BH.info deleted file mode 100644 index 02fd5a4..0000000 --- a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/ECOLI_JGI_0005_BH.info +++ /dev/null @@ -1,6 +0,0 @@ -reads ECOLI_JGI_0005_BH.yaml -single_cell true -reference_genome ../../../data/input/E.coli/MG1655-K12.fasta.gz - -; RL 100 -; IS 220 diff --git a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/ECOLI_JGI_0011_BH.info b/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/ECOLI_JGI_0011_BH.info deleted file mode 100644 index 1f43879..0000000 --- a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/ECOLI_JGI_0011_BH.info +++ /dev/null @@ -1,6 +0,0 @@ -reads ECOLI_JGI_0011_BH.yaml -single_cell true -reference_genome ../../../data/input/E.coli/MG1655-K12.fasta.gz - -; RL 100 -; IS 220 diff --git a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/ECOLI_JGI_0012_BH.info b/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/ECOLI_JGI_0012_BH.info deleted file mode 100644 index 9d9ed84..0000000 --- a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/ECOLI_JGI_0012_BH.info +++ /dev/null @@ -1,6 +0,0 @@ -reads ECOLI_JGI_0012_BH.yaml -single_cell true -reference_genome ../../../data/input/E.coli/MG1655-K12.fasta.gz - -; RL 100 -; IS 220 diff --git a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/ECOLI_JGI_0012_noBH.info b/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/ECOLI_JGI_0012_noBH.info deleted file mode 100644 index b57f712..0000000 --- a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/ECOLI_JGI_0012_noBH.info +++ /dev/null @@ -1,6 +0,0 @@ -reads ECOLI_JGI_0012_noBH.yaml -single_cell true -reference_genome ../../../data/input/E.coli/MG1655-K12.fasta.gz - -; RL 100 -; IS 220 diff --git a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/ECOLI_RL36.info b/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/ECOLI_RL36.info deleted file mode 100644 index 3022d18..0000000 --- a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/ECOLI_RL36.info +++ /dev/null @@ -1,7 +0,0 @@ -paired_reads "../../../data/input/E.coli/sc_lane_1/bh20130116/ecoli_mda_lane1_left.fastq.00.cor.fastq.gz ../../../data/input/E.coli/sc_lane_1/bh20130116/ecoli_mda_lane1_right.fastq.00.cor.fastq.gz " -single_reads "../../../data/input/E.coli/sc_lane_1/bh20130116/ecoli_mda_lane1_unpaired.00.cor.fastq.gz " -RL 100 -IS 270 -single_cell true - -reads /home/lab42/algorithmic-biology/assembler/configs/debruijn/datasets/ECOLI_RL36.yaml diff --git a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/ECOLI_SC_LANE_1_BH.info b/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/ECOLI_SC_LANE_1_BH.info deleted file mode 100644 index 0e9051c..0000000 --- a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/ECOLI_SC_LANE_1_BH.info +++ /dev/null @@ -1,6 +0,0 @@ -reads ECOLI_SC_LANE_1_BH.yaml -single_cell true -reference_genome ../../../data/input/Bacteria/E.coli/K12/MG1655-K12.fasta.gz - -; RL 100 -; IS 270 diff --git a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/ECOLI_SC_LANE_1_BH_woHUMAN.info b/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/ECOLI_SC_LANE_1_BH_woHUMAN.info deleted file mode 100644 index 24d8f91..0000000 --- a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/ECOLI_SC_LANE_1_BH_woHUMAN.info +++ /dev/null @@ -1,6 +0,0 @@ -reads ECOLI_SC_LANE_1_BH_woHUMAN.yaml -single_cell true -reference_genome ../../../data/input/Bacteria/E.coli/K12/MG1655-K12.fasta.gz - -; RL 100 -; IS 270 diff --git a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/GEBA_1_BH.info b/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/GEBA_1_BH.info deleted file mode 100644 index 058f990..0000000 --- a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/GEBA_1_BH.info +++ /dev/null @@ -1,5 +0,0 @@ -reads GEBA_1_BH.yaml -single_cell true - -; RL 150 -; IS 300 diff --git a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/GEBA_2_BH.info b/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/GEBA_2_BH.info deleted file mode 100644 index c3a16b8..0000000 --- a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/GEBA_2_BH.info +++ /dev/null @@ -1,5 +0,0 @@ -reads GEBA_2_BH.yaml -single_cell true - -; RL 150 -; IS 300 diff --git a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/GEBA_3_BH.info b/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/GEBA_3_BH.info deleted file mode 100644 index 0502606..0000000 --- a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/GEBA_3_BH.info +++ /dev/null @@ -1,5 +0,0 @@ -reads GEBA_3_BH.yaml -single_cell true - -; RL 150 -; IS 300 diff --git a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/GEBA_4_BH.info b/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/GEBA_4_BH.info deleted file mode 100644 index c87c8a0..0000000 --- a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/GEBA_4_BH.info +++ /dev/null @@ -1,5 +0,0 @@ -reads GEBA_4_BH.yaml -single_cell true - -; RL 150 -; IS 300 diff --git a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/GEBA_5_BH.info b/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/GEBA_5_BH.info deleted file mode 100644 index 2703ff3..0000000 --- a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/GEBA_5_BH.info +++ /dev/null @@ -1,5 +0,0 @@ -reads GEBA_5_BH.yaml -single_cell true - -; RL 150 -; IS 300 diff --git a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/GEBA_6_BH.info b/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/GEBA_6_BH.info deleted file mode 100644 index b8f6e06..0000000 --- a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/GEBA_6_BH.info +++ /dev/null @@ -1,5 +0,0 @@ -reads GEBA_6_BH.yaml -single_cell true - -; RL 150 -; IS 300 diff --git a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/HMP_LANE_1_BH.info b/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/HMP_LANE_1_BH.info deleted file mode 100644 index 20b0b60..0000000 --- a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/HMP_LANE_1_BH.info +++ /dev/null @@ -1,5 +0,0 @@ -reads HMP_LANE_1_BH.yaml -single_cell true - -; RL 100 -; IS 210 diff --git a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/HMP_LANE_2_BH.info b/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/HMP_LANE_2_BH.info deleted file mode 100644 index 2d6be94..0000000 --- a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/HMP_LANE_2_BH.info +++ /dev/null @@ -1,5 +0,0 @@ -reads HMP_LANE_2_BH.yaml -single_cell true - -; RL 100 -; IS 210 diff --git a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/HMP_LANE_2_JUMPING_BH.info b/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/HMP_LANE_2_JUMPING_BH.info deleted file mode 100644 index 2a6ea1a..0000000 --- a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/HMP_LANE_2_JUMPING_BH.info +++ /dev/null @@ -1,5 +0,0 @@ -reads HMP_LANE_2_JUMPING_BH.yaml -single_cell true - -; RL 100 -; IS 210 diff --git a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/HMP_LANE_3_BH.info b/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/HMP_LANE_3_BH.info deleted file mode 100644 index 5be6b6c..0000000 --- a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/HMP_LANE_3_BH.info +++ /dev/null @@ -1,5 +0,0 @@ -reads HMP_LANE_3_BH.yaml -single_cell true - -; RL 100 -; IS 210 diff --git a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/HMP_LANE_4_BH.info b/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/HMP_LANE_4_BH.info deleted file mode 100644 index 924425c..0000000 --- a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/HMP_LANE_4_BH.info +++ /dev/null @@ -1,5 +0,0 @@ -reads HMP_LANE_4_BH.yaml -single_cell true - -; RL 100 -; IS 210 diff --git a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/HMP_LANE_7_BH.info b/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/HMP_LANE_7_BH.info deleted file mode 100644 index 1d6809e..0000000 --- a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/HMP_LANE_7_BH.info +++ /dev/null @@ -1,5 +0,0 @@ -reads HMP_LANE_7_BH.yaml -single_cell true - -; RL 100 -; IS 210 diff --git a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/HMP_LANE_8_BH.info b/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/HMP_LANE_8_BH.info deleted file mode 100644 index 9ebe0a0..0000000 --- a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/HMP_LANE_8_BH.info +++ /dev/null @@ -1,5 +0,0 @@ -reads HMP_LANE_8_BH.yaml -single_cell true - -; RL 100 -; IS 210 diff --git a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/HUMAN_CHR14_QUAKE.info b/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/HUMAN_CHR14_QUAKE.info deleted file mode 100644 index dcb6ae6..0000000 --- a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/HUMAN_CHR14_QUAKE.info +++ /dev/null @@ -1,5 +0,0 @@ -reads HUMAN_CHR14_QUAKE.yaml -single_cell false - -; RL 101 -; IS 155 diff --git a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/HUMAN_CHR14_QUAKE_CROPPED_5M.info b/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/HUMAN_CHR14_QUAKE_CROPPED_5M.info deleted file mode 100644 index 3a2c7a9..0000000 --- a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/HUMAN_CHR14_QUAKE_CROPPED_5M.info +++ /dev/null @@ -1,5 +0,0 @@ -reads HUMAN_CHR14_QUAKE_CROPPED_5M.yaml -single_cell false - -; RL 101 -; IS 155 diff --git a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/LBOUILLONII_QUAKE.info b/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/LBOUILLONII_QUAKE.info deleted file mode 100644 index c91f831..0000000 --- a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/LBOUILLONII_QUAKE.info +++ /dev/null @@ -1,5 +0,0 @@ -reads LBOUILLONII_QUAKE.yaml -single_cell false - -; RL 100 -; IS 370 diff --git a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/LGASSERI_QUAKE.info b/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/LGASSERI_QUAKE.info deleted file mode 100644 index 79285dc..0000000 --- a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/LGASSERI_QUAKE.info +++ /dev/null @@ -1,8 +0,0 @@ -reads LGASSERI_QUAKE.yaml -single_cell false -reference_genome ../../../data/input/L.gasseri/ref.fasta.gz - -; RL 150 -; IS 250 -; jump_rl 150 -; jump_is 7450 diff --git a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/PGINGIVALIS_BH.info b/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/PGINGIVALIS_BH.info deleted file mode 100644 index cda88ee..0000000 --- a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/PGINGIVALIS_BH.info +++ /dev/null @@ -1,6 +0,0 @@ -reads PGINGIVALIS_BH.yaml -single_cell true -reference_genome ../../../data/input/P.gingivalis/TDC60.fasta.gz - -; RL 100 -; IS 270 diff --git a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/PGINGIVALIS_LANE1_BH.info b/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/PGINGIVALIS_LANE1_BH.info deleted file mode 100644 index 4c6026c..0000000 --- a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/PGINGIVALIS_LANE1_BH.info +++ /dev/null @@ -1,6 +0,0 @@ -reads PGINGIVALIS_LANE1_BH.yaml -single_cell true -reference_genome ../../../data/input/P.gingivalis/TDC60.fasta.gz - -; RL 100 -; IS 270 diff --git a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/PGINGIVALIS_LANE2_BH.info b/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/PGINGIVALIS_LANE2_BH.info deleted file mode 100644 index 831a7d8..0000000 --- a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/PGINGIVALIS_LANE2_BH.info +++ /dev/null @@ -1,6 +0,0 @@ -reads PGINGIVALIS_LANE2_BH.yaml -single_cell true -reference_genome ../../../data/input/P.gingivalis/TDC60.fasta.gz - -; RL 100 -; IS 270 diff --git a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/PGINGIVALIS_LANE3_BH.info b/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/PGINGIVALIS_LANE3_BH.info deleted file mode 100644 index 4bd213e..0000000 --- a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/PGINGIVALIS_LANE3_BH.info +++ /dev/null @@ -1,6 +0,0 @@ -reads PGINGIVALIS_LANE3_BH.yaml -single_cell true -reference_genome ../../../data/input/P.gingivalis/TDC60.fasta.gz - -; RL 100 -; IS 270 diff --git a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/PGINGIVALIS_LANE4_BH.info b/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/PGINGIVALIS_LANE4_BH.info deleted file mode 100644 index faba691..0000000 --- a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/PGINGIVALIS_LANE4_BH.info +++ /dev/null @@ -1,6 +0,0 @@ -reads PGINGIVALIS_LANE4_BH.yaml -single_cell true -reference_genome ../../../data/input/P.gingivalis/TDC60.fasta.gz - -; RL 100 -; IS 270 diff --git a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/PGINGIVALIS_LANE5_BH.info b/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/PGINGIVALIS_LANE5_BH.info deleted file mode 100644 index 79b945e..0000000 --- a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/PGINGIVALIS_LANE5_BH.info +++ /dev/null @@ -1,6 +0,0 @@ -reads PGINGIVALIS_LANE5_BH.yaml -single_cell true -reference_genome ../../../data/input/P.gingivalis/TDC60.fasta.gz - -; RL 100 -; IS 270 diff --git a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/PGINGIVALIS_LANE6_BH.info b/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/PGINGIVALIS_LANE6_BH.info deleted file mode 100644 index f74de8b..0000000 --- a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/PGINGIVALIS_LANE6_BH.info +++ /dev/null @@ -1,6 +0,0 @@ -reads PGINGIVALIS_LANE6_BH.yaml -single_cell true -reference_genome ../../../data/input/P.gingivalis/TDC60.fasta.gz - -; RL 100 -; IS 270 diff --git a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/PSTIPITIS_QUAKE.info b/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/PSTIPITIS_QUAKE.info deleted file mode 100644 index 68ce342..0000000 --- a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/PSTIPITIS_QUAKE.info +++ /dev/null @@ -1,6 +0,0 @@ -reads PSTIPITIS_QUAKE.yaml -single_cell false -reference_genome ../../../data/input/P.stipitis/Pichia_stipitis_Sanger_reference.fa.gz - -; RL 75 -; IS 280 diff --git a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/SAUREUS_BENCHMARK_QUAKE.info b/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/SAUREUS_BENCHMARK_QUAKE.info deleted file mode 100644 index 6e4bfa1..0000000 --- a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/SAUREUS_BENCHMARK_QUAKE.info +++ /dev/null @@ -1,7 +0,0 @@ -reads SAUREUS_BENCHMARK_QUAKE.yaml -single_cell false -reference_genome ../../../data/input/S.aureus/USA300_FPR3757.fasta.gz - -; RL 100 -; IS 170 -; delta 21 diff --git a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/SAUREUS_JCVI_BH.info b/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/SAUREUS_JCVI_BH.info deleted file mode 100644 index f26804a..0000000 --- a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/SAUREUS_JCVI_BH.info +++ /dev/null @@ -1,6 +0,0 @@ -reads SAUREUS_JCVI_BH.yaml -single_cell true -reference_genome ../../../data/input/S.aureus/USA300_FPR3757.fasta.gz - -; RL 100 -; IS 220 diff --git a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/SAUREUS_JCVI_BH_CLEANED_HUMAN_ECOLI_PLASMID.info b/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/SAUREUS_JCVI_BH_CLEANED_HUMAN_ECOLI_PLASMID.info deleted file mode 100644 index 9c0bbb6..0000000 --- a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/SAUREUS_JCVI_BH_CLEANED_HUMAN_ECOLI_PLASMID.info +++ /dev/null @@ -1,5 +0,0 @@ -reads SAUREUS_JCVI_BH_CLEANED_HUMAN_ECOLI_PLASMID.yaml -single_cell true - -; RL 100 -; IS 207 diff --git a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/SAUREUS_SC_LANE_7_BH.info b/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/SAUREUS_SC_LANE_7_BH.info deleted file mode 100644 index 8ed5214..0000000 --- a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/SAUREUS_SC_LANE_7_BH.info +++ /dev/null @@ -1,6 +0,0 @@ -reads SAUREUS_SC_LANE_7_BH.yaml -single_cell true -reference_genome ../../../data/input/S.aureus/USA300_FPR3757.fasta.gz - -; RL 100 -; IS 300 diff --git a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/SSMARAGDINAE_QUAKE.info b/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/SSMARAGDINAE_QUAKE.info deleted file mode 100644 index dd4fd88..0000000 --- a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/SSMARAGDINAE_QUAKE.info +++ /dev/null @@ -1,8 +0,0 @@ -reads SSMARAGDINAE_QUAKE.yaml -single_cell false -reference_genome ../../../data/input/S.smaragdinae/ref.fasta.gz - -; RL 150 -; IS 380 -; jump_rl 75 -; jump_is 6700 diff --git a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/TM6_1_BH.info b/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/TM6_1_BH.info deleted file mode 100644 index 20e9ab8..0000000 --- a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/TM6_1_BH.info +++ /dev/null @@ -1,5 +0,0 @@ -reads TM6_1_BH.yaml -single_cell true - -; RL 100 -; IS 270 diff --git a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/TM6_1_PB_amp40.info b/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/TM6_1_PB_amp40.info deleted file mode 100644 index c7f9062..0000000 --- a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/TM6_1_PB_amp40.info +++ /dev/null @@ -1,7 +0,0 @@ -paired_reads "../../../data/input/TM6/BH1/09.reads.0.right.corrected.fastq ../../../data/input/TM6/BH1/09.reads.0.left.corrected.fastq " -single_reads "../../../data/input/TM6/BH1/09.reads.0.left.unpaired.fastq ../../../data/input/TM6/BH1/09.reads.0.right.unpaired.fastq " -RL 100 -IS 270 -single_cell true - -reads TM6_1_PB_amp40.yaml diff --git a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/TM6_2_BH.info b/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/TM6_2_BH.info deleted file mode 100644 index ad9caf3..0000000 --- a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/TM6_2_BH.info +++ /dev/null @@ -1,5 +0,0 @@ -reads TM6_2_BH.yaml -single_cell true - -; RL 100 -; IS 270 diff --git a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/TM6_3_BH.info b/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/TM6_3_BH.info deleted file mode 100644 index b22c46f..0000000 --- a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/TM6_3_BH.info +++ /dev/null @@ -1,5 +0,0 @@ -reads TM6_3_BH.yaml -single_cell true - -; RL 100 -; IS 270 diff --git a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/UNK_YALE_B02_BH4.info b/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/UNK_YALE_B02_BH4.info deleted file mode 100644 index 444246a..0000000 --- a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/UNK_YALE_B02_BH4.info +++ /dev/null @@ -1,4 +0,0 @@ -reads UNK_YALE_B02_BH4.yaml -single_cell true - -; RL 101 diff --git a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/UNK_YALE_C04_BH4.info b/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/UNK_YALE_C04_BH4.info deleted file mode 100644 index bb96cfc..0000000 --- a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/UNK_YALE_C04_BH4.info +++ /dev/null @@ -1,4 +0,0 @@ -reads UNK_YALE_C04_BH4.yaml -single_cell true - -; RL 101 diff --git a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/UNK_YALE_I20_BH4.info b/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/UNK_YALE_I20_BH4.info deleted file mode 100644 index 6c1036e..0000000 --- a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/UNK_YALE_I20_BH4.info +++ /dev/null @@ -1,4 +0,0 @@ -reads UNK_YALE_I20_BH4.yaml -single_cell true - -; RL 101 diff --git a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/UNK_YALE_I22_BH4.info b/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/UNK_YALE_I22_BH4.info deleted file mode 100644 index f07f0b8..0000000 --- a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/UNK_YALE_I22_BH4.info +++ /dev/null @@ -1,4 +0,0 @@ -reads UNK_YALE_I22_BH4.yaml -single_cell true - -; RL 101 diff --git a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/UNK_YALE_J21_BH4.info b/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/UNK_YALE_J21_BH4.info deleted file mode 100644 index 4b403f0..0000000 --- a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/UNK_YALE_J21_BH4.info +++ /dev/null @@ -1,4 +0,0 @@ -reads UNK_YALE_J21_BH4.yaml -single_cell true - -; RL 101 diff --git a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/UNK_YALE_L16_BH4.info b/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/UNK_YALE_L16_BH4.info deleted file mode 100644 index bf13d89..0000000 --- a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/UNK_YALE_L16_BH4.info +++ /dev/null @@ -1,4 +0,0 @@ -reads UNK_YALE_L16_BH4.yaml -single_cell true - -; RL 101 diff --git a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/UNK_YALE_O11_BH4.info b/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/UNK_YALE_O11_BH4.info deleted file mode 100644 index ee74f5b..0000000 --- a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/UNK_YALE_O11_BH4.info +++ /dev/null @@ -1,4 +0,0 @@ -reads UNK_YALE_O11_BH4.yaml -single_cell true - -; RL 101 diff --git a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/UNK_YALE_P14_BH4.info b/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/UNK_YALE_P14_BH4.info deleted file mode 100644 index d376186..0000000 --- a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/UNK_YALE_P14_BH4.info +++ /dev/null @@ -1,4 +0,0 @@ -reads UNK_YALE_P14_BH4.yaml -single_cell true - -; RL 101 diff --git a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/UNK_YALE_P17_BH4.info b/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/UNK_YALE_P17_BH4.info deleted file mode 100644 index 510a19b..0000000 --- a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/UNK_YALE_P17_BH4.info +++ /dev/null @@ -1,4 +0,0 @@ -reads UNK_YALE_P17_BH4.yaml -single_cell true - -; RL 101 diff --git a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/X5_A_QUAKE.info b/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/X5_A_QUAKE.info deleted file mode 100644 index 9852fa3..0000000 --- a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/X5_A_QUAKE.info +++ /dev/null @@ -1,5 +0,0 @@ -reads X5_A_QUAKE.yaml -single_cell false - -; RL 100 -; IS 175 diff --git a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/X5_B_QUAKE.info b/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/X5_B_QUAKE.info deleted file mode 100644 index dc27cfc..0000000 --- a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets/X5_B_QUAKE.info +++ /dev/null @@ -1,5 +0,0 @@ -reads X5_B_QUAKE.yaml -single_cell false - -; RL 100 -; IS 280 diff --git a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets_archive/ECOLI_EMUL.info b/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets_archive/ECOLI_EMUL.info deleted file mode 100644 index 0ffe427..0000000 --- a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets_archive/ECOLI_EMUL.info +++ /dev/null @@ -1,6 +0,0 @@ -reads ECOLI_EMUL.yaml -single_cell false -reference_genome ../../../data/input/Bacteria/E.coli/K12/MG1655-K12.fasta.gz - -; RL 100 -; IS 400 diff --git a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets_archive/ECOLI_IS220_QUAKE_100K.info b/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets_archive/ECOLI_IS220_QUAKE_100K.info deleted file mode 100644 index b14d31e..0000000 --- a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets_archive/ECOLI_IS220_QUAKE_100K.info +++ /dev/null @@ -1,6 +0,0 @@ -reads ECOLI_IS220_QUAKE_100K.yaml -single_cell false -reference_genome ../../../data/input/Bacteria/E.coli/K12/MG1655-K12.first100K.fasta.gz - -; RL 100 -; IS 220 diff --git a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets_archive/ECOLI_IS220_QUAKE_10K.info b/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets_archive/ECOLI_IS220_QUAKE_10K.info deleted file mode 100644 index 3449570..0000000 --- a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets_archive/ECOLI_IS220_QUAKE_10K.info +++ /dev/null @@ -1,5 +0,0 @@ -reads ECOLI_IS220_QUAKE_10K.yaml -single_cell false - -; RL 100 -; IS 220 diff --git a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets_archive/ECOLI_IS220_QUAKE_1K.info b/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets_archive/ECOLI_IS220_QUAKE_1K.info deleted file mode 100644 index 64c5c14..0000000 --- a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets_archive/ECOLI_IS220_QUAKE_1K.info +++ /dev/null @@ -1,4 +0,0 @@ -reads ECOLI_IS220_QUAKE_1K.yaml -single_cell false - -; RL 100 diff --git a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets_archive/ECOLI_IS220_QUAKE_400K.info b/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets_archive/ECOLI_IS220_QUAKE_400K.info deleted file mode 100644 index 1a78b4d..0000000 --- a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets_archive/ECOLI_IS220_QUAKE_400K.info +++ /dev/null @@ -1,5 +0,0 @@ -reads ECOLI_IS220_QUAKE_400K.yaml -single_cell false -reference_genome ../../../data/input/Bacteria/E.coli/K12/MG1655-K12.first400K.fasta.gz -; RL 100 -; IS 220 diff --git a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets_archive/ECOLI_IS480_QUAKE_1000K.info b/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets_archive/ECOLI_IS480_QUAKE_1000K.info deleted file mode 100644 index 54aa2e3..0000000 --- a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets_archive/ECOLI_IS480_QUAKE_1000K.info +++ /dev/null @@ -1,6 +0,0 @@ -reads ECOLI_IS480_QUAKE_1000K.yaml -single_cell false -reference_genome ../../../data/input/Bacteria/E.coli/K12/MG1655-K12.first1000K.fasta.gz - -; RL 100 -; IS 480 diff --git a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets_archive/ECOLI_IS480_QUAKE_100K.info b/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets_archive/ECOLI_IS480_QUAKE_100K.info deleted file mode 100644 index e5c2162..0000000 --- a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets_archive/ECOLI_IS480_QUAKE_100K.info +++ /dev/null @@ -1,6 +0,0 @@ -reads ECOLI_IS480_QUAKE_100K.yaml -single_cell false -reference_genome ../../../data/input/Bacteria/E.coli/K12/MG1655-K12.first100K.fasta.gz - -; RL 100 -; IS 480 diff --git a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets_archive/ECOLI_IS480_QUAKE_10K.info b/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets_archive/ECOLI_IS480_QUAKE_10K.info deleted file mode 100644 index 5da3543..0000000 --- a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets_archive/ECOLI_IS480_QUAKE_10K.info +++ /dev/null @@ -1,6 +0,0 @@ -reads ECOLI_IS480_QUAKE_10K.yaml -single_cell false -reference_genome ../../../data/input/Bacteria/E.coli/K12/MG1655-K12.first10K.fasta.gz - -; RL 100 -; IS 480 diff --git a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets_archive/ECOLI_IS480_QUAKE_1K.info b/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets_archive/ECOLI_IS480_QUAKE_1K.info deleted file mode 100644 index c3e73c6..0000000 --- a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets_archive/ECOLI_IS480_QUAKE_1K.info +++ /dev/null @@ -1,6 +0,0 @@ -reads ECOLI_IS480_QUAKE_1K.yaml -single_cell false -reference_genome ../../../data/input/Bacteria/E.coli/K12/MG1655-K12.first1K.fasta.gz - -; RL 100 -; IS 480 diff --git a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets_archive/ECOLI_IS480_QUAKE_400K.info b/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets_archive/ECOLI_IS480_QUAKE_400K.info deleted file mode 100644 index 5b01cab..0000000 --- a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets_archive/ECOLI_IS480_QUAKE_400K.info +++ /dev/null @@ -1,6 +0,0 @@ -reads ECOLI_IS480_QUAKE_400K.yaml -single_cell false -reference_genome ../../../data/input/Bacteria/E.coli/K12/MG1655-K12.first400K.fasta.gz - -; RL 100 -; IS 480 diff --git a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets_archive/HMP_LANE_1_BH_20111203.info b/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets_archive/HMP_LANE_1_BH_20111203.info deleted file mode 100644 index 0bda0b9..0000000 --- a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets_archive/HMP_LANE_1_BH_20111203.info +++ /dev/null @@ -1,5 +0,0 @@ -reads HMP_LANE_1_BH_20111203.yaml -single_cell true - -; RL 100 -; IS 210 diff --git a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets_archive/JCVI_ECOLI.info b/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets_archive/JCVI_ECOLI.info deleted file mode 100644 index 99afcea..0000000 --- a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets_archive/JCVI_ECOLI.info +++ /dev/null @@ -1,7 +0,0 @@ -reads JCVI_ECOLI.yaml -single_cell true -reference_genome ../../../data/input/E.coli/MG1655-K12.fasta.gz - -; RL 100 -; IS 210 -; jump_is 3000 diff --git a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets_archive/JCVI_ECOLI_400K.info b/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets_archive/JCVI_ECOLI_400K.info deleted file mode 100644 index e45e3e9..0000000 --- a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets_archive/JCVI_ECOLI_400K.info +++ /dev/null @@ -1,7 +0,0 @@ -reads JCVI_ECOLI_400K.yaml -single_cell true -reference_genome ../../../data/input/E.coli/MG1655-K12.first400K.fasta.gz - -; RL 100 -; IS 210 -; jump_is 3000 diff --git a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets_archive/TRASH_DELTA_CORRECTED_1.info b/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets_archive/TRASH_DELTA_CORRECTED_1.info deleted file mode 100644 index 70bdc9c..0000000 --- a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets_archive/TRASH_DELTA_CORRECTED_1.info +++ /dev/null @@ -1,5 +0,0 @@ -reads TRASH_DELTA_CORRECTED_1.yaml -single_cell true - -; RL 100 -; IS 250 diff --git a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets_archive/TRASH_DELTA_EULER_CORRECTED_1.info b/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets_archive/TRASH_DELTA_EULER_CORRECTED_1.info deleted file mode 100644 index 3d6fd93..0000000 --- a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets_archive/TRASH_DELTA_EULER_CORRECTED_1.info +++ /dev/null @@ -1,5 +0,0 @@ -reads TRASH_DELTA_EULER_CORRECTED_1.yaml -single_cell true - -; RL 100 -; IS 250 diff --git a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets_archive/TRASH_EAS600_EMUL_FULL.info b/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets_archive/TRASH_EAS600_EMUL_FULL.info deleted file mode 100644 index e52bc85..0000000 --- a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets_archive/TRASH_EAS600_EMUL_FULL.info +++ /dev/null @@ -1,6 +0,0 @@ -reads TRASH_EAS600_EMUL_FULL.yaml -single_cell false -reference_genome ../../../data/input/E.coli/MG1655-K12.fasta.gz - -; RL 100 -; IS 500 diff --git a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets_archive/TRASH_JCVI_control_MDA_EColi_0212.info b/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets_archive/TRASH_JCVI_control_MDA_EColi_0212.info deleted file mode 100644 index a3c0992..0000000 --- a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets_archive/TRASH_JCVI_control_MDA_EColi_0212.info +++ /dev/null @@ -1,6 +0,0 @@ -reads TRASH_JCVI_control_MDA_EColi_0212.yaml -single_cell true -reference_genome ../../../data/input/E.coli/MG1655-K12.fasta.gz - -; RL 100 -; IS 210 diff --git a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets_archive/TRASH_SAUREUS_CORRECTED_1.info b/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets_archive/TRASH_SAUREUS_CORRECTED_1.info deleted file mode 100644 index d2c2128..0000000 --- a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets_archive/TRASH_SAUREUS_CORRECTED_1.info +++ /dev/null @@ -1,6 +0,0 @@ -reads TRASH_SAUREUS_CORRECTED_1.yaml -single_cell true -reference_genome ../../../data/input/S.aureus/USA300_FPR3757.fasta.gz - -; RL 100 -; IS 220 diff --git a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets_archive/TRASH_SAUREUS_CORRECTED_MS.info b/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets_archive/TRASH_SAUREUS_CORRECTED_MS.info deleted file mode 100644 index 46ee21a..0000000 --- a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets_archive/TRASH_SAUREUS_CORRECTED_MS.info +++ /dev/null @@ -1,6 +0,0 @@ -reads TRASH_SAUREUS_CORRECTED_MS.yaml -single_cell false -reference_genome ../../../data/input/S.aureus/USA300_FPR3757.fasta.gz - -; RL 100 -; IS 180 diff --git a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets_archive/TRASH_SAUREUS_JCVI_0212.info b/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets_archive/TRASH_SAUREUS_JCVI_0212.info deleted file mode 100644 index f8a6eac..0000000 --- a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets_archive/TRASH_SAUREUS_JCVI_0212.info +++ /dev/null @@ -1,6 +0,0 @@ -reads TRASH_SAUREUS_JCVI_0212.yaml -single_cell true -reference_genome ../../../data/input/S.aureus/USA300_FPR3757.fasta.gz - -; RL 100 -; IS 220 diff --git a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets_archive/TRASH_SAUREUS_JCVI_0412_it2.info b/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets_archive/TRASH_SAUREUS_JCVI_0412_it2.info deleted file mode 100644 index ebc380c..0000000 --- a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets_archive/TRASH_SAUREUS_JCVI_0412_it2.info +++ /dev/null @@ -1,6 +0,0 @@ -reads TRASH_SAUREUS_JCVI_0412_it2.yaml -single_cell true -reference_genome ../../../data/input/S.aureus/USA300_FPR3757.fasta.gz - -; RL 100 -; IS 208 diff --git a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets_archive/TRASH_SAUREUS_JCVI_0512_it3.info b/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets_archive/TRASH_SAUREUS_JCVI_0512_it3.info deleted file mode 100644 index f6369d5..0000000 --- a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets_archive/TRASH_SAUREUS_JCVI_0512_it3.info +++ /dev/null @@ -1,6 +0,0 @@ -reads TRASH_SAUREUS_JCVI_0512_it3.yaml -single_cell true -reference_genome ../../../data/input/S.aureus/USA300_FPR3757.fasta.gz - -; RL 100 -; IS 208 diff --git a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets_archive/TRASH_SC_0310.info b/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets_archive/TRASH_SC_0310.info deleted file mode 100644 index 458426c..0000000 --- a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets_archive/TRASH_SC_0310.info +++ /dev/null @@ -1,6 +0,0 @@ -reads TRASH_SC_0310.yaml -single_cell true -reference_genome ../../../data/input/E.coli/MG1655-K12.fasta.gz - -; RL 100 -; IS 270 diff --git a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets_archive/TRASH_SC_0710.info b/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets_archive/TRASH_SC_0710.info deleted file mode 100644 index 7e24b56..0000000 --- a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets_archive/TRASH_SC_0710.info +++ /dev/null @@ -1,6 +0,0 @@ -reads TRASH_SC_0710.yaml -single_cell true -reference_genome ../../../data/input/E.coli/MG1655-K12.fasta.gz - -; RL 100 -; IS 270 diff --git a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets_archive/TRASH_SC_1110.info b/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets_archive/TRASH_SC_1110.info deleted file mode 100644 index 3c63972..0000000 --- a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets_archive/TRASH_SC_1110.info +++ /dev/null @@ -1,6 +0,0 @@ -reads TRASH_SC_1110.yaml -single_cell true -reference_genome ../../../data/input/E.coli/MG1655-K12.fasta.gz - -; RL 100 -; IS 270 diff --git a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets_archive/TRASH_SC_2909.info b/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets_archive/TRASH_SC_2909.info deleted file mode 100644 index c8b5ad0..0000000 --- a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets_archive/TRASH_SC_2909.info +++ /dev/null @@ -1,6 +0,0 @@ -reads TRASH_SC_2909.yaml -single_cell true -reference_genome ../../../data/input/E.coli/MG1655-K12.fasta.gz - -; RL 100 -; IS 270 diff --git a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets_archive/TRASH_SC_EULER_FULL.info b/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets_archive/TRASH_SC_EULER_FULL.info deleted file mode 100644 index 15f1b38..0000000 --- a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets_archive/TRASH_SC_EULER_FULL.info +++ /dev/null @@ -1,6 +0,0 @@ -reads TRASH_SC_EULER_FULL.yaml -single_cell true -reference_genome ../../../data/input/E.coli/MG1655-K12.fasta.gz - -; RL 100 -; IS 270 diff --git a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets_archive/TRASH_SC_EULER_NEW.info b/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets_archive/TRASH_SC_EULER_NEW.info deleted file mode 100644 index 43f420f..0000000 --- a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets_archive/TRASH_SC_EULER_NEW.info +++ /dev/null @@ -1,6 +0,0 @@ -reads TRASH_SC_EULER_NEW.yaml -single_cell true -reference_genome ../../../data/input/E.coli/MG1655-K12.fasta.gz - -; RL 100 -; IS 270 diff --git a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets_archive/TRASH_SC_EULER_NEW_FILTERED.info b/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets_archive/TRASH_SC_EULER_NEW_FILTERED.info deleted file mode 100644 index 83d0f15..0000000 --- a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets_archive/TRASH_SC_EULER_NEW_FILTERED.info +++ /dev/null @@ -1,6 +0,0 @@ -reads TRASH_SC_EULER_NEW_FILTERED.yaml -single_cell true -reference_genome ../../../data/input/E.coli/MG1655-K12.fasta.gz - -; RL 100 -; IS 270 diff --git a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets_archive/TRASH_SC_FULL.info b/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets_archive/TRASH_SC_FULL.info deleted file mode 100644 index c348ad5..0000000 --- a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets_archive/TRASH_SC_FULL.info +++ /dev/null @@ -1,6 +0,0 @@ -reads TRASH_SC_FULL.yaml -single_cell true -reference_genome ../../../data/input/E.coli/MG1655-K12.fasta.gz - -; RL 100 -; IS 270 diff --git a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets_archive/TRASH_SC_HAMMER_1410_CROPPED.info b/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets_archive/TRASH_SC_HAMMER_1410_CROPPED.info deleted file mode 100644 index 65e4945..0000000 --- a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets_archive/TRASH_SC_HAMMER_1410_CROPPED.info +++ /dev/null @@ -1,6 +0,0 @@ -reads TRASH_SC_HAMMER_1410_CROPPED.yaml -single_cell true -reference_genome ../../../data/input/E.coli/MG1655-K12.first1K.fasta.gz - -; RL 100 -; IS 270 diff --git a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets_archive/TRASH_SC_K35.info b/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets_archive/TRASH_SC_K35.info deleted file mode 100644 index 0defd7c..0000000 --- a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets_archive/TRASH_SC_K35.info +++ /dev/null @@ -1,6 +0,0 @@ -reads TRASH_SC_K35.yaml -single_cell true -reference_genome ../../../data/input/E.coli/MG1655-K12.fasta.gz - -; RL 100 -; IS 270 diff --git a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets_archive/TRASH_SC_NEW_1.info b/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets_archive/TRASH_SC_NEW_1.info deleted file mode 100644 index 36763e9..0000000 --- a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets_archive/TRASH_SC_NEW_1.info +++ /dev/null @@ -1,6 +0,0 @@ -reads TRASH_SC_NEW_1.yaml -single_cell true -reference_genome ../../../data/input/E.coli/MG1655-K12.fasta.gz - -; RL 100 -; IS 270 diff --git a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets_archive/TRASH_SC_NEW_1_FILTERED.info b/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets_archive/TRASH_SC_NEW_1_FILTERED.info deleted file mode 100644 index c0aed5e..0000000 --- a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets_archive/TRASH_SC_NEW_1_FILTERED.info +++ /dev/null @@ -1,6 +0,0 @@ -reads TRASH_SC_NEW_1_FILTERED.yaml -single_cell true -reference_genome ../../../data/input/E.coli/MG1655-K12.fasta.gz - -; RL 100 -; IS 270 diff --git a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets_archive/TRASH_SC_NEW_2.info b/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets_archive/TRASH_SC_NEW_2.info deleted file mode 100644 index 31da70f..0000000 --- a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets_archive/TRASH_SC_NEW_2.info +++ /dev/null @@ -1,6 +0,0 @@ -reads TRASH_SC_NEW_2.yaml -single_cell true -reference_genome ../../../data/input/E.coli/MG1655-K12.fasta.gz - -; RL 100 -; IS 270 diff --git a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets_archive/TRASH_SC_NEW_3.info b/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets_archive/TRASH_SC_NEW_3.info deleted file mode 100644 index ec4c251..0000000 --- a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets_archive/TRASH_SC_NEW_3.info +++ /dev/null @@ -1,6 +0,0 @@ -reads TRASH_SC_NEW_3.yaml -single_cell true -reference_genome ../../../data/input/E.coli/MG1655-K12.fasta.gz - -; RL 100 -; IS 270 diff --git a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/diploid_mode.info b/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/diploid_mode.info deleted file mode 100644 index 1044227..0000000 --- a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/diploid_mode.info +++ /dev/null @@ -1,16 +0,0 @@ -mode diploid - -simp -{ - post_simplif_enabled false - - ; bulge remover: - br - { - enabled false - } -} - -amb_de { - enabled true -} diff --git a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/large_genome_mode.info b/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/large_genome_mode.info deleted file mode 100644 index 128008e..0000000 --- a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/large_genome_mode.info +++ /dev/null @@ -1,22 +0,0 @@ -mode large_genome - - -pe { - -debug_output false - -params { - scaffolding_mode old_pe_2015 -} -} - - -bwa_aligner -{ - bwa_enable true - debug false - path_to_bwa ./bin/bwa-spades - min_contig_len 0 -} - - diff --git a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/plasmid_mode.info b/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/plasmid_mode.info deleted file mode 100644 index 2cd9a84..0000000 --- a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/plasmid_mode.info +++ /dev/null @@ -1,12 +0,0 @@ -mode plasmid - -plasmid -{ - long_edge_length 1000 - edge_length_for_median 10000 - relative_coverage 0.3 - small_component_size 10000 - small_component_relative_coverage 2 - min_component_length 10000 - min_isolated_length 1000 -} diff --git a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/rna_mode.info b/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/rna_mode.info deleted file mode 100644 index aae3d6f..0000000 --- a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/rna_mode.info +++ /dev/null @@ -1,134 +0,0 @@ -mode rna - -preserve_raw_paired_index true - -contig_output { - scaffolds_name transcripts - ; none --- do not output broken scaffolds | break_gaps --- break only by N steches | break_all --- break all with overlap < k - output_broken_scaffolds none -} - -simp -{ - ; enable advanced ec removal algo - topology_simplif_enabled false - tc - { - ; rctc: tip_cov < rctc * not_tip_cov - ; tc_lb: max_tip_length = max((min(k, read_length / 2) * tc_lb), read_length); - condition "{ mmm 3 tc_lb 3.5, cb 100000, rctc 0.1 } { tc_lb 3.5, cb 4, rctc 10000 } { tc_lb 0.1, cb 20, rctc 10000 }" - } - - dead_end - { - condition "{ tc_lb 3.5, cb 2 }" - enabled true - } - - ; bulge remover: - br - { - max_additive_length_coefficient 100 - max_coverage 1000000.0 - max_relative_coverage 100000.0 ; bulge_cov < this * not_bulge_cov - } - - ; erroneous connections remover: - ec - { - ; ec_lb: max_ec_length = k + ec_lb - ; icb: iterative coverage bound - ; to_ec_lb: max_ec_length = 2*tip_length(to_ec_lb) - 1 - ; nbr: use not bulge erroneous connections remover - ; condition "{ ec_lb 9, icb 40.0, nbr }" - condition "{ ec_lb 30, icb 50 }" - } - - ; relative coverage erroneous connections remover: - rcec - { - enabled true - rcec_lb 30 - rcec_cb 0.5 - } - - rcc - { - enabled true - coverage_gap 20. - max_length_coeff 2.0 - max_length_with_tips_coeff 3.0 - max_vertex_cnt 30 - max_ec_length_coefficient 30 - max_coverage_coeff 5.0 - } - ;all topology based erroneous connection removers are off - ier - { - enabled false - max_length 100 - max_coverage 2 - max_length_any_cov 0 ; will be taken max with read_length - } - ; hidden ec remover - her - { - enabled true - uniqueness_length 1500 - unreliability_threshold 0.2 - relative_threshold 5 - } - - init_clean - { - activation_cov -1. - ier - { - enabled false - } - - tip_condition "" - ec_condition "" - } - -} - -; disable filtering in rna mode -de -{ - raw_filter_threshold 0 -} - -pe { -params { - multi_path_extend true - remove_overlaps false - - scaffolding_mode old - - extension_options - { - use_default_single_threshold true - single_threshold 0.05 - } - - scaffolder { - cutoff 1 - hard_cutoff 10 - - cluster_info false - - min_overlap_for_rna_scaffolding 10 - } - - path_cleaning - { - enabled true - min_length 30 - isolated_min_length 50 - min_length_for_low_covered 150 - min_coverage 2 - } - -} -} diff --git a/src/SPAdes-3.10.1-Linux/share/spades/configs/dipspades/config.info b/src/SPAdes-3.10.1-Linux/share/spades/configs/dipspades/config.info deleted file mode 100644 index 773fdaa..0000000 --- a/src/SPAdes-3.10.1-Linux/share/spades/configs/dipspades/config.info +++ /dev/null @@ -1,64 +0,0 @@ -; base parameters ; -bp { - K 55; - use_multithreading true; - max_threads 16; - max_memory 512; - ; size of buffer for each thread in MB, 0 for autodetection - read_buffer_size 0 -} - -; input-output params ; -io { - haplocontigs haplocontigs - log_filename log.properties - output_base data/debruijn - output_dir simulated_e.coli_100k - tmp_dir dipspades_tmp/ - saves data/debruijn -} - -; run params ; -rp { - entry_point dipspades - : entry_point dipspades:heterozygosity_estimation - ; entry_point dipspades:contig_graph_construction - ; entry_point dipspades:polymorphic_br - ; entry_point dipspades:dipspades - ; entry_point dipspades:consensus_construction - ; entry_point dipspades:haplotype_assembly - developer_mode true -} - -; polymorphic bulge remover config -pbr { - enabled true - rel_bulge_length .8 ; min(len1, len2) / max(len1, len2) >= rel_bulge_length - ; where len1, len2 - lengths of bulge sides - rel_bulge_align .5 ; editdist(seq1, seq2) / min(|seq1|, |seq2|) <= rel_bulge_align - ; where seq1, seq2 - sequences of bulge sides - paired_vert_abs_threshold 50 ; - paired_vert_rel_threshold .15 ; - max_bulge_nucls_len 25000 ; maximal length (in nt number) of bulge sides - max_neigh_number 100 ; maximal number of neighs for bulge search - num_iters_lbr 15 ; number of light bulge remover iterations -} - -; consensus constructor config -cc { - enabled true - bulge_len_quantile .95 ; value of this quantile of bulge length histogram - ; is upper bound of bulge length in contigs - tails_lie_on_bulges true ; tail has to lie on bulge side - estimate_tails true - align_bulge_sides true ; check bulge into paired haplocontigs for alignment - min_overlap_size 1500 ; minimal allowable length of overlap (in nt) - min_lcs_size 1500 ; minimal allowable length of shared subsequence of - ; paired contigs (in nt) - max_loop_length 500 ; maximal length of loop that can ignored in remover red contigs -} - -; haplotype_assembly -ha { - ha_enabled true -} diff --git a/src/SPAdes-3.10.1-Linux/share/spades/dipspades_manual.html b/src/SPAdes-3.10.1-Linux/share/spades/dipspades_manual.html deleted file mode 100644 index f480ae0..0000000 --- a/src/SPAdes-3.10.1-Linux/share/spades/dipspades_manual.html +++ /dev/null @@ -1,243 +0,0 @@ - - - dipSPAdes 1.0 Manual - - - - -

dipSPAdes 1.0 manual

- -1. What is dipSPAdes?
-    1.1. dipSPAdes pipeline
-2. Installing dipSPAdes
-3. Running dipSPAdes
-    3.1 dipSPAdes input
-    3.2 dipSPAdes command line options
-         3.2.1 Basic options
-         3.2.2 Input data
-         3.2.3 Advanced options
-         3.2.4 Examples
-         3.2.5 Examples of advanced options usage
-    3.3 dipSPAdes output
-         3.3.1 Haplocontigs alignment output
-         3.3.2 Haplotype assembly output
-4. Citation
-5. Feedback and bug reports
- - -

1. What is dipSPAdes?

-dipSPAdes is a genome assembler designed specifically for diploid highly polymorphic genomes based on SPAdes. -It takes advantage of divergence between haplomes in repetitive genome regions to resolve them and construct longer contigs. -dipSPAdes produces consensus contigs (representing a consensus of both haplomes for the orthologous regions) and performs haplotype assembly. -Note that dipSPAdes can only benefit from high polymorphism rate (at least 0.4%). -For the data with low polymorphism rate no improvement in terms of N50 vs consentional assemblers is expected. - - -

1.1 dipSPAdes pipeline

-dipSPAdes pipeline consists of three steps:
-    1. Assembly of haplocontigs (contigs representing both haplomes).
-    2. Consensus contigs construction.
-    3. Haplotype assembly.
- - - -

2. Installing dipSPAdes

-dipSPAdes comes as a part of SPAdes assembler package.
-See SPAdes manual for installation instructions.
-Please verify your dipSPAdes installation prior to initiate the dipSPAdes: -
-
-    <spades installation dir>/dipspades.py --test
-
-
-If the installation is successful, you will find the following information at the end of the log: - -
-
- * Assembled consensus contigs are in: test_dipspades/dipspades/consensus_contigs.fasta
- * Assembled paired consensus contigs are in: test_dipspades/dipspades/paired_consensus_contigs.fasta
- * Assembled paired consensus contigs are in: test_dipspades/dipspades/unpaired_consensus_contigs.fasta
- * Alignment of haplocontigs is in: test_dipspades/dipspades/haplocontigs_alignent
- * Assembled paired consensus contigs are in: test_dipspades/dipspades/haplotype_assembly.out
- * Possibly conservative regions are in: test_dipspades/dipspades/possibly_conservative_regions.fasta
-
-Thank you for using SPAdes!
-
-======= dipSPAdes finished.
-dipSPAdes log can be found here: test_dipspades/dipspades/dipspades.log
-
-
- - -

3. Running dipSPAdes

- - -

3.1 dipSPAdes input

-dipSPAdes can take as an input one of the three following alternatives: -
    -
  • Reads. dipSPAdes takes them in the same format as described in SPAdes manual. In this case dipSPAdes runs SPAdes to obtain haplocontigs as the first step "Assembly of haplocontigs".
  • -
  • Haplocontigs. dipSPAdes can use user-provided haplocontigs (for example computed with another assembler). In this case dipSPAdes skips the first step and starts from the second step "Consensus contigs construction". -
  • Reads and haplocontigs. dipSPAdes can also use both reads and haplocontigs. In this case dipSPAdes first computes haplocontigs from reads and then uses mixture of computed haplocontigs and user-provided haplocontigs as input for further steps. -
  • -
-We provide example command lines for each of these scenarios in Examples section. - - -

3.2 dipSPAdes command line options

-To run dipSPAdes from the command line, type
-
-
-dipspades.py [options] -o <output_dir>
-
-

-Note that we assume that SPAdes installation directory is added to the PATH variable (provide full path to dipSPAdes executable otherwise: <spades installation dir>/dipspades.py). - - -

3.2.1 Basic options

-

- -o <output_dir>
-     Specifies the output directory. Required option. -

-

- --test
-     Runs SPAdes on the toy data set; see section 2. -

-

- -h (or --help)
-     Prints help. -

- -

- -v (or --version)
-     Prints version. -

- - -

3.2.2 Input data

-For input reads specfication use options of SPAdes described in SPAdes manual. -

- --hap <file_name>
-     Specifies file with haplocontigs in FASTA format. Note that dipSPAdes can use any number of haplocontig files. -

- - -

3.2.3 Advanced options

-

- --expect-gaps
-     Indicates significant amount of expected gaps in genome coverage (e.g. for datasets with relatively low coverage). -

-

- --expect-rearrangements
-     Indicates extreme heterozygosity rate in haplomes (e.g. haplomes differ by long insertions/deletions). -

-

- --hap-assembly
-     Enables haplotype assembly phase that results in files haplotype_assembly.out, conservative_regions.fasta, and possibly_conservative_regions.fasta (see Haplotype assembly output). -

- - -

3.2.4 Examples

-To perform assembly (construct consensus contigs and perform haplotype assembly) of diploid genome from paired-end reads (reads_left.fastq and reads_right.fastq) run: -
-
-dipspades.py -1 reads_left.fastq -2 reads_right.fastq -o output_dir
-
-

- -To perform assembly (construct consensus contigs and perform haplotype assembly) of diploid genome from preliminary computed haplocontigs (haplocontigs1.fasta and haplocontigs2.fasta) run: -
-
-dipspades.py --hap haplocontigs1.fasta --hap haplocontigs2.fasta -o output_dir
-
-

- -To perform assembly of diploid genome from both reads (reads_left.fastq and reads_right.fastq) and preliminary computed haplocontigs (haplocontigs.fasta) run: -
-
-dipspades.py -1 reads_left.fastq -2 reads_right.fastq --hap haplocontigs.fasta -o output_dir
-
-

- - -

3.2.5 Examples of advanced options usage

-To perform assembly of diploid genome with additional options run: -
-
-dipspades.py -1 reads_left.fastq -2 reads_right.fastq --expect-gaps -o output_dir
-
-

- -To relaunch steps 2 and 3 of dipSPAdes (see dipSPAdes pipeline section) with different set of advanced options you can use haplocontigs constructed in the previous run (see dipSPAdes output section) run: -
-
-dipspades.py -hap output_dir/haplocontigs.fasta --expect-gaps --expect-rearrangements --hap-assembly -o new_output_dir
-
-

- - -

3.3 dipSPAdes output

-dipSPAdes produces the following output:
-
    -
  • haplocontigs.fasta - file in FASTA format with computed haplocontigs (if input reads were provided).
  • -
  • consensus_contigs.fasta - file in FASTA format with a set of constructed consensus contigs
  • -
  • paired_consensus_contigs.fasta - file in FASTA format with a subset of consensus contigs that have a polymorphism detected on them.
  • -
  • unpaired_consensus_contigs.fasta - file in FASTA format with a subset of consensus contigs that have no polymorphism detected on them. These contigs are potentially redundant.
  • -
  • haplocontigs_alignment.out - file with recorded haplocontigs that correspond to homologous regions on haplomes.
  • -
  • haplotype_assembly.out - result of haplotype assembly
  • -
  • conservative_regions.fasta - file in FASTA format with conservative regions of diploid genome
  • -
  • possibly_conservative_regions.fasta - file in FASTA format with unresolved regions of haplocontigs that may be either conservative or repetitive.
  • -
- - -

3.3.1 Haplocontigs alignment output

-File haplocontigs_alignment.out consists of blocks of the following structure:
-
-
-Consensus contig: CONSENSUS_CONTIG_NAME
-    Overlapping haplocontigs:
-        HAPLOCONTIG_NAME_1 HAPLOCONTIG_NAME_2
-                         ...
-    Nested haplocontigs:
-        HAPLOCONTIG_NAME_3 HAPLOCONTIG_NAME_4
-                        ...
-
-
-Each block corresponds to alignment of haplocontigs to consensus contigs CONSENSUS_CONTIG_NAME. -Name of consensus contig, CONSENSUS_CONTIG_NAME, coincides with the name in file consensus_contigs.fasta. -Further the list of pairs of haplocontig names is printed. -Haplocontigs in each pair at least partially correspond either to the same positions on the same haplome or to homologous positions on different haplomes. -Also the list is divided into two subblocks: Overlapping haplocontigs and Nested haplocontigs. -Overlapping haplocontigs contain pairs of haplocontigs such that the suffix of the first haplocontig corresponds to the prefix of the second contig. -Nested haplocontigs contains pairs of haplocontigs such that certain subcontig of the second contig corresponds to the entire first contig. - - -

3.3.2 Haplotype assembly output

-File haplotype_assembly.out consists of lines of the following structure:
-
-
-HAPLOCONTIG_NAME_1	HAPLOCONTIG_NAME_2
-
-
-where HAPLOCONTIG_NAME_1 and HAPLOCONTIG_NAME_2 are names of homologous haplocontigs that correspond to different haplomes and at least partially correspond to homologous positions in different chromosomes. -Names correspond to the names of haplocontigs specified as an input using options --hap or computed at the first step. - - -

4. Citation

-

- If you use dipSPAdes in your research, please include Safonova, Bankevich, and Pevzner, 2014 in your reference list. -

- In addition, we would like to list your publications that use our software on our website. Please email the reference, the name of your lab, department and institution to spades.support@cab.spbu.ru. -
- - -

5. Feedback and bug reports

-Your comments, bug reports, and suggestions are very welcomed.
-If you have trouble running dipSPAdes, please provide us with the files params.txt and dipspades.log from the directory <output_dir>.
-Address for communications: <spades.support@cab.spbu.ru. - - - diff --git a/src/SPAdes-3.10.1-Linux/share/spades/manual.html b/src/SPAdes-3.10.1-Linux/share/spades/manual.html deleted file mode 100644 index e94fbe6..0000000 --- a/src/SPAdes-3.10.1-Linux/share/spades/manual.html +++ /dev/null @@ -1,1223 +0,0 @@ - - - SPAdes 3.10.1 Manual - - - -

SPAdes 3.10.1 Manual

- -1. About SPAdes
-    1.1. Supported data types
-    1.2. SPAdes pipeline
-    1.3. SPAdes performance
-2. Installation
-    2.1. Downloading SPAdes Linux binaries
-    2.2. Downloading SPAdes binaries for Mac
-    2.3. Downloading and compiling SPAdes source code
-    2.4. Verifying your installation
-3. Running SPAdes
-    3.1. SPAdes input
-    3.2. SPAdes command line options
-    3.3. Assembling IonTorrent reads
-    3.4. Assembling long Illumina paired reads (2x150 and 2x250)
-    3.5. SPAdes output
-    3.6. plasmidSPAdes output
-    3.7. Assembly evaluation
-4. Citation
-5. Feedback and bug reports
-
- - -

1. About SPAdes

-

- SPAdes – St. Petersburg genome assembler – is an assembly toolkit containing various assembly pipelines. This manual will help you to install and run SPAdes. -SPAdes version 3.10.1 was released under GPLv2 on March 1, 2017 and can be downloaded from http://cab.spbu.ru/software/spades/. - - -

1.1 Supported data types

-

- The current version of SPAdes works with Illumina or IonTorrent reads and is capable of providing hybrid assemblies using PacBio, Oxford Nanopore and Sanger reads. You can also provide additional contigs that will be used as long reads. -

- Version 3.10.1 of SPAdes supports paired-end reads, mate-pairs and unpaired reads. SPAdes can take as input several paired-end and mate-pair libraries simultaneously. Note, that SPAdes was initially designed for small genomes. It was tested on bacterial (both single-cell MDA and standard isolates), fungal and other small genomes. SPAdes is not intended for larger genomes (e.g. mammalian size genomes). For such purposes you can use it at your own risk. -

- SPAdes 3.10.1 includes the following additional pipelines: -

    -
  • dipSPAdes – a module for assembling highly polymorphic diploid genomes (see dipSPAdes manual).
  • -
  • metaSPAdes – a pipeline for metagenomic data sets (see metaSPAdes options).
  • -
  • plasmidSPAdes – a pipeline for extracting and assembling plasmids from WGS data sets (see plasmidSPAdes options).
  • -
  • rnaSPAdes – a de novo transcriptome assembler from RNA-Seq data (see rnaSPAdes manual).
  • -
  • truSPAdes – a module for TruSeq barcode assembly (see truSPAdes manual).
  • -
- - -

1.2 SPAdes pipeline

-

-SPAdes comes in several separate modules: -

    -
  • BayesHammer – read error correction tool for Illumina reads, which works well on both single-cell and standard data sets.
  • -
  • IonHammer – read error correction tool for IonTorrent data, which also works on both types of data.
  • -
  • SPAdes – iterative short-read genome assembly module; values of K are selected automatically based on the read length and data set type.
  • -
  • MismatchCorrector – a tool which improves mismatch and short indel rates in resulting contigs and scaffolds; this module uses the BWA tool [Li H. and Durbin R., 2009]; MismatchCorrector is turned off by default, but we recommend to turn it on (see SPAdes options section).
  • -
-

- We recommend to run SPAdes with BayesHammer/IonHammer to obtain high-quality assemblies. However, if you use your own read correction tool, it is possible to turn error correction module off. It is also possible to use only the read error correction stage, if you wish to use another assembler. See the SPAdes options section. - - - -

1.3 SPAdes' performance

-

- In this section we give approximate data about SPAdes' performance on two data sets: -

-

- We ran SPAdes with default parameters using 16 threads on a server with Intel Xeon 2.27GHz processors and SSD hard drive. BayesHammer runs in approximately half an hour and takes up to 8Gb of RAM to perform read error correction on each data set. Assembly takes about 10 minutes for the E. coli isolate data set and 20 minutes for the E. coli single-cell data set. Both data sets require about 8Gb of RAM (see notes below). MismatchCorrector runs for about 15 minutes on both data sets, and requires less than 2Gb of RAM. All modules also require additional disk space for storing results (corrected reads, contigs, etc) and temporary files. See the table below for more precise values. - -

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Data set   E. coli isolate E. coli single-cell
Stage Time Peak RAM
usage (Gb)
Additional
disk space (Gb)
Time Peak RAM
usage (Gb)
Additional
disk space (Gb)
BayesHammer 29m 7.1 11 34m 7.6 8.8
SPAdes 11m 8.4 1.6 17m 8 3.0
MismatchCorrector 13m 1.8 27.1 16m 1.8 25.5
Whole pipeline 53m 8.4 29.6 1h 7m 8 28.3
- -

- Notes: -

    -
  • Running SPAdes without preliminary read error correction (e.g. without BayesHammer or IonHammer) will likely require more time and memory.
  • -
  • Each module removes its temporary files as soon as it finishes.
  • -
  • SPAdes uses 512 Mb per thread for buffers, which results in higher memory consumption. If you set memory limit manually, SPAdes will use smaller buffers and thus less RAM.
  • -
  • Performance statistics is given for SPAdes version 3.10.1.
  • -
- - - -

2. Installation

-

- - SPAdes requires a 64-bit Linux system or Mac OS and Python (supported versions are 2.4, 2.5, 2.6, 2.7, 3.2, 3.3, 3.4 and 3.5) to be pre-installed on it. To obtain SPAdes you can either download binaries or download source code and compile it yourself. - - -

2.1 Downloading SPAdes Linux binaries

- -

- To download SPAdes Linux binaries and extract them, go to the directory in which you wish SPAdes to be installed and run: - -

-
-    wget http://cab.spbu.ru/files/release3.10.1/SPAdes-3.10.1-Linux.tar.gz
-    tar -xzf SPAdes-3.10.1-Linux.tar.gz
-    cd SPAdes-3.10.1-Linux/bin/
-
-
- -

- In this case you do not need to run any installation scripts – SPAdes is ready to use. The following files will be placed in the bin directory: -

    -
  • spades.py (main executable script)
  • -
  • dipspades.py (main executable script for dipSPAdes)
  • -
  • metaspades.py (main executable script for metaSPAdes)
  • -
  • plasmidspades.py (main executable script for plasmidSPAdes)
  • -
  • rnaspades.py (main executable script for rnaSPAdes)
  • -
  • truspades.py (main executable script for truSPAdes)
  • -
  • hammer (read error correcting module for Illumina reads)
  • -
  • ionhammer (read error correcting module for IonTorrent reads)
  • -
  • spades (assembly module)
  • -
  • bwa-spades (BWA alignment module which is required for mismatch correction)
  • -
  • corrector (mismatch correction module)
  • -
  • dipspades (assembly module for highly polymorphic diploid genomes)
  • -
  • scaffold_correction (executable used in truSPAdes pipeline)
  • -
- -

- We also suggest adding SPAdes installation directory to the PATH variable. - - -

2.2 Downloading SPAdes binaries for Mac

- -

- To obtain SPAdes binaries for Mac, go to the directory in which you wish SPAdes to be installed and run: - -

-
-    curl http://cab.spbu.ru/files/release3.10.1/SPAdes-3.10.1-Darwin.tar.gz -o SPAdes-3.10.1-Darwin.tar.gz
-    tar -zxf SPAdes-3.10.1-Darwin.tar.gz
-    cd SPAdes-3.10.1-Darwin/bin/
-
-
- -

- Just as in Linux, SPAdes is ready to use and no further installation steps are required. You will get the same files in the bin directory: -

    -
  • spades.py (main executable script)
  • -
  • dipspades.py (main executable script for dipSPAdes)
  • -
  • metaspades.py (main executable script for metaSPAdes)
  • -
  • plasmidspades.py (main executable script for plasmidSPAdes)
  • -
  • rnaspades.py (main executable script for rnaSPAdes)
  • -
  • truspades.py (main executable script for truSPAdes)
  • -
  • hammer (read error correcting module for Illumina reads)
  • -
  • ionhammer (read error correcting module for IonTorrent reads)
  • -
  • spades (assembly module)
  • -
  • bwa-spades (BWA alignment module which is required for mismatch correction)
  • -
  • corrector (mismatch correction module)
  • -
  • dipspades (assembly module for highly polymorphic diploid genomes)
  • -
  • scaffold_correction (executable used in truSPAdes pipeline)
  • -
- -

- We also suggest adding SPAdes installation directory to the PATH variable. - - - -

2.3 Downloading and compiling SPAdes source code

-

- If you wish to compile SPAdes by yourself you will need the following libraries to be pre-installed: -

    -
  • g++ (version 4.8.2 or higher)
  • -
  • cmake (version 2.8.12 or higher)
  • -
  • zlib
  • -
  • libbz2
  • -
- -

- If you meet these requirements, you can download the SPAdes source code: - -

-
-    wget http://cab.spbu.ru/files/release3.10.1/SPAdes-3.10.1.tar.gz
-    tar -xzf SPAdes-3.10.1.tar.gz
-    cd SPAdes-3.10.1
-
-
- -

- and build it with the following script: - -

-
-    ./spades_compile.sh
-
-
- -

- SPAdes will be built in the directory ./bin. If you wish to install SPAdes into another directory, you can specify full path of destination folder by running the following command in bash or sh: - -

-
-    PREFIX=<destination_dir> ./spades_compile.sh
-
-
- -

- for example: - -

-
-    PREFIX=/usr/local ./spades_compile.sh
-
-
- -

- which will install SPAdes into /usr/local/bin. - -

- After installation you will get the same files in ./bin (or <destination_dir>/bin if you specified PREFIX) directory: -

    -
  • spades.py (main executable script)
  • -
  • dipspades.py (main executable script for dipSPAdes)
  • -
  • metaspades.py (main executable script for metaSPAdes)
  • -
  • plasmidspades.py (main executable script for plasmidSPAdes)
  • -
  • rnaspades.py (main executable script for rnaSPAdes)
  • -
  • truspades.py (main executable script for truSPAdes)
  • -
  • hammer (read error correcting module for Illumina reads)
  • -
  • ionhammer (read error correcting module for IonTorrent reads)
  • -
  • spades (assembly module)
  • -
  • bwa-spades (BWA alignment module which is required for mismatch correction)
  • -
  • corrector (mismatch correction module)
  • -
  • dipspades (assembly module for highly polymorphic diploid genomes)
  • -
  • scaffold_correction (executable used in truSPAdes pipeline)
  • -
- -

- We also suggest adding SPAdes installation directory to the PATH variable. - - -

2.4 Verifying your installation

-

- For testing purposes, SPAdes comes with a toy data set (reads that align to first 1000 bp of E. coli). To try SPAdes on this data set, run: - -

-
-    <spades installation dir>/spades.py --test
-
-
- -

- If you added SPAdes installation directory to the PATH variable, you can run: - -

-
-    spades.py --test
-
-
- - For the simplicity we further assume that SPAdes installation directory is added to the PATH variable. - - -

- If the installation is successful, you will find the following information at the end of the log: - -

-
-===== Assembling finished. Used k-mer sizes: 21, 33, 55
-
- * Corrected reads are in spades_test/corrected/
- * Assembled contigs are in spades_test/contigs.fasta
- * Assembled scaffolds are in spades_test/scaffolds.fasta
- * Assembly graph is in spades_test/assembly_graph.fastg
- * Assembly graph in GFA format is in spades_test/assembly_graph.gfa
- * Paths in the assembly graph corresponding to the contigs are in spades_test/contigs.paths
- * Paths in the assembly graph corresponding to the scaffolds are in spades_test/scaffolds.paths
-
-======= SPAdes pipeline finished.
-
-========= TEST PASSED CORRECTLY.
-
-SPAdes log can be found here: spades_test/spades.log
-
-Thank you for using SPAdes!
-
-
- - -

3. Running SPAdes

- - -

3.1 SPAdes input

-

- SPAdes takes as input paired-end reads, mate-pairs and single (unpaired) reads in FASTA and FASTQ. For IonTorrent data SPAdes also supports unpaired reads in unmapped BAM format (like the one produced by Torrent Server). However, in order to run read error correction, reads should be in FASTQ or BAM format. Sanger, Oxford Nanopore and PacBio CLR reads can be provided in both formats since SPAdes does not run error correction for these types of data. - -

- To run SPAdes 3.10.1 you need at least one library of the following types: -

    -
  • Illumina paired-end/high-quality mate-pairs/unpaired reads
  • -
  • IonTorrent paired-end/high-quality mate-pairs/unpaired reads
  • -
  • PacBio CCS reads
  • -
-

-Illumina and IonTorrent libraries should not be assembled together. All other types of input data are compatible. SPAdes should not be used if only PacBio CLR, Oxford Nanopore, Sanger reads or additional contigs are available. - -

-SPAdes supports mate-pair only assembly. However, we recommend to use only high-quality mate-pair libraries in this case (e.g. that do not have a paired-end part). We tested mate-pair only pipeline using Illumina Nextera mate-pairs. See more here. - -

- Current version SPAdes also supports Lucigen NxSeq® Long Mate Pair libraries, which always have forward-reverse orientation. If you wish to use Lucigen NxSeq® Long Mate Pair reads, you will need Python regex library to be pre-installed on your machine. You can install it with Python pip-installer: -

-
-    pip install regex
-
-
- or with the Easy Install Python module: -
-
-    easy_install regex
-
-
- -

Notes: -

    -
  • It is not recommended to run SPAdes on PacBio reads with low coverage (less than 5).
  • -
  • We suggest not to run SPAdes on PacBio reads for large genomes.
  • -
  • SPAdes accepts gzip-compressed files.
  • -
- -

Read-pair libraries

-

- By using command line interface, you can specify up to nine different paired-end libraries, up to nine mate-pair libraries and also up to nine high-quality mate-pair ones. If you wish to use more, you can use YAML data set file. We further refer to paired-end and mate-pair libraries simply as to read-pair libraries. - -

- By default, SPAdes assumes that paired-end and high-quality mate-pair reads have forward-reverse (fr) orientation and usual mate-pairs have reverse-forward (rf) orientation. However, different orientations can be set for any library by using SPAdes options. - - -

- To distinguish reads in pairs we refer to them as left and right reads. For forward-reverse orientation, the forward reads correspond to the left reads and the reverse reads, to the right. Similarly, in reverse-forward orientation left and right reads correspond to reverse and forward reads, respectively, etc. - -

- Each read-pair library can be stored in several files or several pairs of files. Paired reads can be organized in two different ways: - -

    -
  • In file pairs. In this case left and right reads are placed in different files and go in the same order in respective files.
  • -
  • In merged files. In this case, the reads are interlaced, so that each right read goes after the corresponding paired left read.
  • -
- -

- For example, Illumina produces paired-end reads in two files: s_1_1_sequence.txt and s_1_2_sequence.txt. If you choose to store reads in file pairs make sure that for every read from s_1_1_sequence.txt the corresponding paired read from s_1_2_sequence.txt is placed in the respective paired file on the same line number. If you choose to use merged files, every read from s_1_1_sequence.txt should be followed by the corresponding paired read from s_1_2_sequence.txt. - - -

Unpaired (single-read) libraries

-

- By using command line interface, you can specify up to nine different single-read libraries. To input more libraries, you can use YAML data set file. -

- Single librairies are assumed to have high quality and a reasonable coverage. For example, you can provide PacBio CCS reads as a single-read library. Additionally, if you have merged a paired-end library with overlapping read-pairs (for example, using FLASh), you can provide the resulting reads as a single-read library. -

- Note, that you should not specify PacBio CLR, Sanger reads or additional contigs as single-read libraries, each of them has a separate option. - - - -

PacBio and Oxford Nanopore reads

- -

- SPAdes can take as an input an unlimited number of PacBio and Oxford Nanopore libraries. - -

- PacBio CLR and Oxford Nanopore reads are used for hybrid assemblies (e.g. with Illumina or IonTorrent). There is no need to pre-correct this kind of data. SPAdes will use PacBio CLR and Oxford Nanopore reads for gap closure and repeat resolution. - -

- For PacBio you just need to have filtered subreads in FASTQ/FASTA format. Provide these filtered subreads using --pacbio option. Oxford Nanopore reads are provided with --nanopore option. - -

- PacBio CCS/Reads of Insert reads or pre-corrected (using third-party software) PacBio CLR / Oxford Nanopore reads can be simply provided as single reads to SPAdes. - -

Additional contigs

-

- In case you have contigs of the same genome generated by other assembler(s) and you wish to merge them into SPAdes assembly, you can specify additional contigs using --trusted-contigs or --untrusted-contigs. First option is used when high quality contigs are available. These contigs will be used for graph construction, gap closure and repeat resolution. Second option is used for less reliable contigs that may have more errors or contigs of unknown quality. These contigs will be used only for gap closure and repeat resolution. The number of additional contigs is unlimited. - -

- Note, that SPAdes does not perform assembly using genomes of closely-related species. Only contigs of the same genome should be specified. - - -

- - -

3.2 SPAdes command line options

-

- To run SPAdes from the command line, type - -

-
-    spades.py [options] -o <output_dir>
-
-
-Note that we assume that SPAdes installation directory is added to the PATH variable (provide full path to SPAdes executable otherwise: <spades installation dir>/spades.py). - - -

Basic options

-

- -o <output_dir>
-     Specify the output directory. Required option. -

- - -

- --sc
-     This flag is required for MDA (single-cell) data. -

- - -

- --meta   (same as metaspades.py)
-     This flag is recommended when assembling metagenomic data sets (runs metaSPAdes, see paper for more details). Currently metaSPAdes supports only a single library which has to be paired-end (we hope to remove this restriction soon). It does not support careful mode (mismatch correction is not available). In addition, you cannot specify coverage cutoff for metaSPAdes. Note that metaSPAdes might be very sensitive to presence of the technical sequences remaining in the data (most notably adapter readthroughs), please run quality control and pre-process your data accordingly. -

- - -

- --plasmid   (same as plasmidspades.py)
-     This flag is required when assembling only plasmids from WGS data sets (runs plasmidSPAdes, see paper for the algorithm details). Note, that plasmidSPAdes is not compatible with metaSPAdes and single-cell mode. Additionally, we do not recommend to run plasmidSPAdes on more than one library. - See section 3.6 for plasmidSPAdes output details. -

- - - -

- --rna   (same as rnaspades.py)
-     This flag should be used when assembling RNA-Seq data sets (runs rnaSPAdes). To learn more, see rnaSPAdes manual. -

- -

- --iontorrent
-     This flag is required when assembling IonTorrent data. Allows BAM files as input. Carefully read section 3.3 before using this option. -

- - -

- --test
-     Runs SPAdes on the toy data set; see section 2.3. -

- -

- -h (or --help)
-     Prints help. -

- -

- -v (or --version)
-     Prints SPAdes version. -

- - - -

Pipeline options

- -

- --only-error-correction
-     Performs read error correction only. -

- -

- --only-assembler
-     Runs assembly module only. -

- -
-

- --careful
-     Tries to reduce the number of mismatches and short indels. Also runs MismatchCorrector – a post processing tool, which uses
BWA tool (comes with SPAdes). This option is recommended only for assembly of small genomes. We strongly recommend not to use it for large and medium-size eukaryotic genomes. Note, that this options is is not supported by metaSPAdes and rnaSPAdes. - -

- -

- --continue
-     Continues SPAdes run from the specified output folder starting from the last available check-point. Check-points are made after: -

    -
  • error correction module is finished
  • -
  • iteration for each specified K value of assembly module is finished
  • -
  • mismatch correction is finished for contigs or scaffolds
  • -
-For example, if specified K values are 21, 33 and 55 and SPAdes was stopped or crashed during assembly stage with K = 55, you can run SPAdes with the --continue option specifying the same output directory. SPAdes will continue the run starting from the assembly stage with K = 55. Error correction module and iterations for K equal to 21 and 33 will not be run again. -Note that all options except -o <output_dir> are ignored if --continue is set. -

- -

- --restart-from <check_point>
-     Restart SPAdes run from the specified output folder starting from the specified check-point. Check-points are: -

    -
  • ec – start from error correction
  • -
  • as – restart assembly module from the first iteration
  • -
  • k<int> – restart from the iteration with specified k values, e.g. k55
  • -
  • mc – restart mismatch correction
  • -
-In comparison to the --continue option, you can change some of the options when using --restart-from. You can change any option except: all basic options, all options for specifying input data (including --dataset), --only-error-correction option and --only-assembler option. For example, if you ran assembler with k values 21,33,55 without mismatch correction, you can add one more iteration with k=77 and run mismatch correction step by running SPAdes with following options:
- --restart-from k55 -k 21,33,55,77 --mismatch-correction -o <previous_output_dir>. -
Since all files will be overwritten, do not forget to copy your assembly from the previous run if you need it. -

- -

- --disable-gzip-output
-     Forces read error correction module not to compress the corrected reads. If this options is not set, corrected reads will be in *.fastq.gz format. -

- - - -

Input data

- -  Specifying one library (previously used format) -

- --12 <file_name>
-     File with interlaced forward and reverse paired-end reads. -

- -

- -1 <file_name>
-     File with forward reads. -

- -

- -2 <file_name>
-     File with reverse reads. -

- -

- -s <file_name>
-     File with unpaired reads. -

- -  Specifying multiple libraries (new format)
-
    - -
  •  Single-read libraries
  • - -

    - --s<#> <file_name>
    -     File for single-read library number <#> (<#> = 1,2,..,9). For example, for the first paired-end library the option is: - --s1 <file_name>
    -     Do not use -s options for single-read libraries, since it specifies unpaired reads for the first paired-end library. -

    - - -
  •  Paired-end libraries
  • - -

    - --pe<#>-12 <file_name>
    -     File with interlaced reads for paired-end library number <#> (<#> = 1,2,..,9). For example, for the first single-read library the option is: - --pe1-12 <file_name>
    -

    - -

    - --pe<#>-1 <file_name>
    -     File with left reads for paired-end library number <#> (<#> = 1,2,..,9). -

    - -

    - --pe<#>-2 <file_name>
    -     File with right reads for paired-end library number <#> (<#> = 1,2,..,9). -

    - -

    - --pe<#>-s <file_name>
    -     File with unpaired reads from paired-end library number <#> (<#> = 1,2,..,9)
    -     For example, paired reads can become unpaired during the error correction procedure. -

    - -

    - --pe<#>-<or>
    -     Orientation of reads for paired-end library number <#> (<#> = 1,2,..,9; <or> = "fr","rf","ff").
    -     The default orientation for paired-end libraries is forward-reverse. For example, to specify reverse-forward orientation for the second paired-end library, you should use the flag: - --pe2-rf
    -

    - -
  •  Mate-pair libraries
  • -

    - --mp<#>-12 <file_name>
    -     File with interlaced reads for mate-pair library number <#> (<#> = 1,2,..,9). -

    - -

    - --mp<#>-1 <file_name>
    -     File with left reads for mate-pair library number <#> (<#> = 1,2,..,9). -

    - -

    - --mp<#>-2 <file_name>
    -     File with right reads for mate-pair library number <#> (<#> = 1,2,..,9). -

    -

    - --mp<#>-<or>
    -     Orientation of reads for mate-pair library number <#> (<#> = 1,2,..,9; <or> = "fr","rf","ff").
    -     The default orientation for mate-pair libraries is reverse-forward. For example, to specify forward-forward orientation for the first mate-pair library, you should use the flag: - --mp1-ff
    -

    - - -
  •  High-quality mate-pair libraries (can be used for mate-pair only assembly)
  • - -

    - --hqmp<#>-12 <file_name>
    -     File with interlaced reads for high-quality mate-pair library number <#> (<#> = 1,2,..,9). -

    - -

    - --hqmp<#>-1 <file_name>
    -     File with left reads for high-quality mate-pair library number <#> (<#> = 1,2,..,9). -

    - -

    - --hqmp<#>-2 <file_name>
    -     File with right reads for high-quality mate-pair library number <#> (<#> = 1,2,..,9). -

    -

    - --hqmp<#>-s <file_name>
    -     File with unpaired reads from high-quality mate-pair library number <#> (<#> = 1,2,..,9)
    -

    - -

    - --hqmp<#>-<or>
    -     Orientation of reads for high-quality mate-pair library number <#> (<#> = 1,2,..,9; <or> = "fr","rf","ff").
    -     The default orientation for high-quality mate-pair libraries is forward-reverse. For example, to specify reverse-forward orientation for the first high-quality mate-pair library, you should use the flag: - --hqmp1-rf
    -

    - - - - -
  •  Lucigen NxSeq® Long Mate Pair libraries (see section 3.1 for details)
  • - -

    - --nxmate<#>-1 <file_name>
    -     File with left reads for Lucigen NxSeq® Long Mate Pair library number <#> (<#> = 1,2,..,9). -

    - -

    - --nxmate<#>-2 <file_name>
    -     File with right reads for Lucigen NxSeq® Long Mate Pair library number <#> (<#> = 1,2,..,9). -

    - -

    -
- - -  Specifying data for hybrid assembly - -

- --pacbio <file_name>
-     File with PacBio CLR reads. For PacBio CCS reads use -s option. More information on PacBio reads is provided in section 3.1. -

- - -

- --nanopore <file_name>
-     File with Oxford Nanopore reads. -

- - -

- --sanger <file_name>
-     File with Sanger reads -

- -

- --trusted-contigs <file_name>
-     Reliable contigs of the same genome, which are likely to have no misassemblies and small rate of other errors (e.g. mismatches and indels). This option is not intended for contigs of the related species. -

- -

- --untrusted-contigs <file_name>
-     Contigs of the same genome, quality of which is average or unknown. Contigs of poor quality can be used but may introduce errors in the assembly. This option is also not intended for contigs of the related species. -

- - -  Specifying input data with YAML data set file (advanced) - -

- An alternative way to specify an input data set for SPAdes is to create a YAML data set file. -By using a YAML file you can provide an unlimited number of paired-end, mate-pair and unpaired libraries. -Basically, YAML data set file is a text file, in which input libraries are provided as a comma-separated list in square brackets. -Each library is provided in braces as a comma-separated list of attributes. -The following attributes are available: -

    -
  • orientation ("fr", "rf", "ff")
  • -
  • type ("paired-end", "mate-pairs", "hq-mate-pairs", "single", "pacbio", "nanopore", "sanger", "trusted-contigs", "untrusted-contigs")
  • -
  • interlaced reads (comma-separated list of files with interlaced reads)
  • -
  • left reads (comma-separated list of files with left reads)
  • -
  • right reads (comma-separated list of files with right reads)
  • -
  • single reads (comma-separated list of files with single reads)
  • -
- -

- To properly specify a library you should provide its type and at least one file with reads. -Orientation is an optional attribute. Its default value is "fr" (forward-reverse) for paired-end libraries and -"rf" (reverse-forward) for mate-pair libraries. - -

- The value for each attribute is given after a colon. -Comma-separated lists of files should be given in square brackets. -For each file you should provide its full path in double quotes. -Make sure that files with right reads are given in the same order as corresponding files with left reads. - -

- For example, if you have one paired-end library splitted into two pairs of files: -

-
-    lib_pe1_left_1.fastq
-    lib_pe1_right_1.fastq
-    lib_pe1_left_2.fastq
-    lib_pe1_right_2.fastq
-
-
- -one mate-pair library: - -
-
-    lib_mp1_left.fastq
-    lib_mp1_right.fastq
-
-
-and PacBio CCS and CLR reads: - -
-
-    pacbio_ccs.fastq
-    pacbio_clr.fastq
-
-
- - YAML file should look like this: - -
-
-    [
-      {
-        orientation: "fr",
-        type: "paired-end",
-        right reads: [
-          "/FULL_PATH_TO_DATASET/lib_pe1_right_1.fastq",
-          "/FULL_PATH_TO_DATASET/lib_pe1_right_2.fastq" 
-        ],
-        left reads: [
-          "/FULL_PATH_TO_DATASET/lib_pe1_left_1.fastq",
-          "/FULL_PATH_TO_DATASET/lib_pe1_left_2.fastq" 
-        ]
-      },
-      {
-        orientation: "rf",
-        type: "mate-pairs",
-        right reads: [
-          "/FULL_PATH_TO_DATASET/lib_mp1_right.fastq" 
-        ],
-        left reads: [
-          "/FULL_PATH_TO_DATASET/lib_mp1_left.fastq"
-        ]
-      },
-      {
-        type: "single",
-        single reads: [
-          "/FULL_PATH_TO_DATASET/pacbio_ccs.fastq" 
-        ]
-      },
-      {
-        type: "pacbio",
-        single reads: [
-          "/FULL_PATH_TO_DATASET/pacbio_clr.fastq" 
-        ]
-      }
-    ]
-
-
- - -

- Once you have created a YAML file save it with .yaml extension (e.g. as my_data_set.yaml) and run SPAdes using the --dataset option:
- --dataset <your YAML file>

- - Notes: -

    -
  • The --dataset option cannot be used with any other options for specifying input data.
  • -
  • We recommend to nest all files with long reads of the same data type in a single library block.
  • -
- - - -

Advanced options

- -

- -t <int> (or --threads <int>)
-     Number of threads. The default value is 16. -

- -

- -m <int> (or --memory <int>)
-     Set memory limit in Gb. SPAdes terminates if it reaches this limit. The default value is 250 Gb. Actual amount of consumed RAM will be below this limit. Make sure this value is correct for the given machine. SPAdes uses the limit value to automatically determine the sizes of various buffers, etc. -

- -

- --tmp-dir <dir_name>
-     Set directory for temporary files from read error correction. The default value is <output_dir>/corrected/tmp -

- -

- -k <int,int,...>
-     Comma-separated list of k-mer sizes to be used (all values must be odd, less than 128 and listed in ascending order). If --sc is set the default values are 21,33,55. For multicell data sets K values are automatically selected using maximum read length (
see note for assembling long Illumina paired reads for details). To properly select K values for IonTorrent data read section 3.3. -

- -

- --cov-cutoff <float>
-     Read coverage cutoff value. Must be a positive float value, or 'auto', or 'off'. Default value is 'off'. When set to 'auto' SPAdes automatically computes coverage threshold using conservative strategy. Note, that this option is not supported by metaSPAdes. -

- - -

- --phred-offset <33 or 64>
-     PHRED quality offset for the input reads, can be either 33 or 64. It will be auto-detected if it is not specified. -

- - - - - -

Examples

-

- To test the toy data set, you can also run the following command from the SPAdes bin directory: - -

-
-    spades.py --pe1-1 ../share/spades/test_dataset/ecoli_1K_1.fq.gz \
-    --pe1-2 ../share/spades/test_dataset/ecoli_1K_2.fq.gz -o spades_test
-
-
- -

- If you have your library separated into several pairs of files, for example: - -

-
-    lib1_forward_1.fastq
-    lib1_reverse_1.fastq
-    lib1_forward_2.fastq
-    lib1_reverse_2.fastq
-
-
- - -

- make sure that corresponding files are given in the same order: - -

-
-    spades.py --pe1-1 lib1_forward_1.fastq --pe1-2 lib1_reverse_1.fastq \
-    --pe1-1 lib1_forward_2.fastq --pe1-2 lib1_reverse_2.fastq \
-    -o spades_output
-
-
- -

- Files with interlacing paired-end reads or files with unpaired reads can be specified in any order with one file per option, for example: - -

-
-    spades.py --pe1-12 lib1_1.fastq --pe1-12 lib1_2.fastq \
-    --pe1-s lib1_unpaired_1.fastq --pe1-s lib1_unpaired_2.fastq \
-    -o spades_output    
-
-
- -

- If you have several paired-end and mate-pair reads, for example: - -

  • paired-end library 1 -
    -
    -    lib_pe1_left.fastq
    -    lib_pe1_right.fastq
    -
    -
    -

    -

  • mate-pair library 1 - -
    -
    -    lib_mp1_left.fastq
    -    lib_mp1_right.fastq
    -
    -
    -

    -

  • mate-pair library 2 -
    -
    -    lib_mp2_left.fastq
    -    lib_mp2_right.fastq
    -
    -
    - -

    - make sure that files corresponding to each library are grouped together: - -

    -
    -    spades.py --pe1-1 lib_pe1_left.fastq --pe1-2 lib_pe1_right.fastq \
    -    --mp1-1 lib_mp1_left.fastq --mp1-2 lib_mp1_right.fastq \
    -    --mp2-1 lib_mp2_left.fastq --mp2-2 lib_mp2_right.fastq \
    -    -o spades_output
    -
    -
    - -

    - If you have IonTorrent unpaired reads, PacBio CLR and additional reliable contigs: - -

    -
    -    it_reads.fastq
    -    pacbio_clr.fastq
    -    contigs.fasta
    -
    -
    - -

    - run SPAdes with the following command: - -

    -
    -    spades.py --iontorrent -s it_reads.fastq \
    -    --pacbio pacbio_clr.fastq --trusted-contigs contigs.fastq \
    -    -o spades_output
    -
    -
    - -

    - If a single-read library is splitted into several files: - -

    -
    -    unpaired1_1.fastq
    -    unpaired1_2.fastq
    -    unpaired1_3.fasta
    -
    -
    - -

    - specify them as one library: - -

    -
    -    spades.py --s1 unpaired1_1.fastq \
    -    --s1 unpaired1_2.fastq --s1 unpaired1_3.fastq \
    -    -o spades_output
    -
    -
    - -

    - All options for specifying input data can be mixed if needed, but make sure that files for each library are grouped and files with left and right paired reads are listed in the same order. - - -

    3.3 Assembling IonTorrent reads

    -

    -Only FASTQ or BAM files are supported as input. - -

    -The selection of k-mer length is non-trivial for IonTorrent. If the dataset is more or less conventional (good coverage, not high GC, etc), then use our recommendation for long reads (e.g. assemble using k-mer lengths 21,33,55,77,99,127). However, due to increased error rate some changes of k-mer lengths (e.g. selection of shorter ones) may be required. For example, if you ran SPAdes with k-mer lengths 21,33,55,77 and then decided to assemble the same data set using more iterations and larger values of K, you can run SPAdes once again specifying the same output folder and the following options: --restart-from k77 -k 21,33,55,77,99,127 --mismatch-correction -o <previous_output_dir>. Do not forget to copy contigs and scaffolds from the previous run. We're planning to tackle issue of selecting k-mer lengths for IonTorrent reads in next versions. - -

    You may need no error correction for Hi-Q enzyme at all. However, we suggest trying to assemble your data with and without error correction and select the best variant. - -

    For non-trivial datasets (e.g. with high GC, low or uneven coverage) we suggest to enable single-cell mode (setting --sc option) and use k-mer lengths of 21,33,55. - - - -

    3.4 Assembling long Illumina paired reads (2x150 and 2x250)

    -

    -Recent advances in DNA sequencing technology have led to a rapid increase in read length. Nowadays, it is a common situation to have a data set consisting of 2x150 or 2x250 paired-end reads produced by Illumina MiSeq or HiSeq2500. However, the use of longer reads alone will not automatically improve assembly quality. An assembler that can properly take advantage of them is needed. -

    -SPAdes' use of iterative k-mer lengths allows benefiting from the full potential of the long paired-end reads. Currently one has to set the assembler options up manually, but we plan to incorporate automatic calculation of necessary options soon. -

    -Please note that in addition to the read length, the insert length also matters a lot. It is not recommended to sequence a 300bp fragment with a pair of 250bp reads. We suggest using 350-500 bp fragments with 2x150 reads and 550-700 bp fragments with 2x250 reads. - -

    Multi-cell data set with read length 2x150

    -

    -Do not turn off SPAdes error correction (BayesHammer module), which is included in SPAdes default pipeline. -

    -If you have enough coverage (50x+), then you may want to try to set k-mer lengths of 21, 33, 55, 77 (selected by default for reads with length 150bp). -

    -Make sure you run assembler with the --careful option to minimize number of mismatches in the final contigs. -

    -We recommend that you check the SPAdes log file at the end of the each iteration to control the average coverage of the contigs. - -

    -For reads corrected prior to running the assembler: -

    -
    -    spades.py -k 21,33,55,77 --careful --only-assembler <your reads> -o spades_output
    -
    -
    - -

    -To correct and assemble the reads: -

    -
    -    spades.py -k 21,33,55,77 --careful <your reads> -o spades_output
    -
    -
    - -

    Multi-cell data set with read lengths 2 x 250

    -

    -Do not turn off SPAdes error correction (BayesHammer module), which is included in SPAdes default pipeline. -

    -By default we suggest to increase k-mer lengths in increments of 22 until the k-mer length reaches 127. The exact length of the k-mer depends on the coverage: k-mer length of 127 corresponds to 50x k-mer coverage and higher. For read length 250bp SPAdes automatically chooses K values equal to 21, 33, 55, 77, 99, 127. -

    -Make sure you run assembler with --careful option to minimize number of mismatches in the final contigs. -

    -We recommend you to check the SPAdes log file at the end of the each iteration to control the average coverage of the contigs. -

    -For reads corrected prior to running the assembler: -

    -
    -    spades.py -k 21,33,55,77,99,127 --careful --only-assembler <your reads> -o spades_output
    -
    -
    - -

    -To correct and assemble the reads: -

    -
    -    spades.py -k 21,33,55,77,99,127 --careful <your reads> -o spades_output
    -
    -
    - -

    Single-cell data set with read lengths 2 x 150 or 2 x 250

    -

    -The default k-mer lengths are recommended. For single-cell data sets SPAdes selects k-mer sizes 21, 33 and 55. -

    -However, it might be tricky to fully utilize the advantages of long reads you have. Consider contacting us for more information and to discuss assembly strategy. -
    - - -
    -

    3.5 SPAdes output

    -

    - SPAdes stores all output files in <output_dir> , which is set by the user. - -

    - -

    - Contigs/scaffolds names in SPAdes output FASTA files have the following format:
    >NODE_3_length_237403_cov_243.207_ID_45
    Here 3 is the number of the contig/scaffold, 237403 is the sequence length in nucleotides and 243.207 is the k-mer coverage for the last (largest) k value used. Note that the k-mer coverage is always lower than the read (per-base) coverage. - -

    - In general, SPAdes uses two techniques for joining contigs into scaffolds. First one relies on read pairs and tries to estimate the size of the gap separating contigs. The second one relies on the assembly graph: e.g. if two contigs are separated by a complex tandem repeat, that cannot be resolved exactly, contigs are joined into scaffold with a fixed gap size of 100 bp. Contigs produced by SPAdes do not contain N symbols. - -

    - To view FASTG and GFA files we recommend to use Bandage visualization tool. Note that sequences stored in assembly_graph.fastg correspond to contigs before repeat resolution (edges of the assembly graph). Paths corresponding to contigs after repeat resolution (scaffolding) are stored in contigs.paths (scaffolds.paths) in the format accepted by Bandage (see Bandage wiki for details). The example is given below. - -

    Let the contig with the name NODE_5_length_100000_cov_215.651_ID_5 consist of the following edges of the assembly graph: -

    -    
    -    >EDGE_2_length_33280_cov_199.702
    -    >EDGE_5_length_84_cov_321.414'
    -    >EDGE_3_length_111_cov_175.304
    -    >EDGE_5_length_84_cov_321.414'
    -    >EDGE_4_length_66661_cov_223.548
    -    
    -
    -

    -Then, contigs.paths will contain the following record: -

    -    
    -    NODE_5_length_100000_cov_215.651_ID_5
    -    2+,5-,3+,5-,4+
    -    
    -
    - -

    -Since the current version of Bandage does not accept paths with gaps, paths corresponding contigs/scaffolds jumping over a gap in the assembly graph are splitted by semicolon at the gap positions. For example, the following record -

    -    
    -    NODE_3_length_237403_cov_243.207_ID_45
    -    21-,17-,15+,17-,16+;
    -    31+,23-,22+,23-,4-
    -    
    -
    -states that NODE_3_length_237403_cov_243.207_ID_45 corresponds to the path with 10 edges, but jumps over a gap between edges EDGE_16_length_21503_cov_482.709 and EDGE_31_length_140767_cov_220.239. - -

    -The full list of <output_dir> content is presented below: -

    -
    -    scaffolds.fastaresulting scaffolds (recommended for use as resulting sequences)
    -    contigs.fastaresulting contigs
    -    assembly_graph.fastgassembly graph
    -    contigs.pathscontigs paths in the assembly graph
    -    scaffolds.pathsscaffolds paths in the assembly graph
    -    before_rr.fastacontigs before repeat resolution
    -
    -    corrected/files from read error correction
    -        configs/configuration files for read error correction
    -        corrected.yamlinternal configuration file
    -        Output files with corrected reads
    -
    -    params.txtinformation about SPAdes parameters in this run
    -    spades.logSPAdes log
    -    dataset.infointernal configuration file
    -    input_dataset.yamlinternal YAML data set file
    -    K<##>/directory containing intermediate files from the run with K=<##>. These files should not be used as assembly results; use resulting contigs/scaffolds in files mentioned above.
    -
    - -

    - SPAdes will overwrite these files and directories if they exist in the specified <output_dir>. - - -

    3.6 plasmidSPAdes output

    -

    -plasmidSPAdes outputs only DNA sequences from putative plasmids. Output file names and formats remain the same as in SPAdes (see previous section), with the following difference. For all contig names in contigs.fasta, scaffolds.fasta and assembly_graph.fastg -we append suffix _component_X, where X is the id of the putative plasmid, which the contig belongs to. Note that plasmidSPAdes may not be able to separate similar plasmids and thus their contigs may appear with the same id. - - -

    3.7 Assembly evaluation

    - -

    - - QUAST may be used to generate summary statistics (N50, maximum contig length, GC %, # genes found in a reference list or with built-in gene finding tools, etc.) for a single assembly. It may also be used to compare statistics for multiple assemblies of the same data set (e.g., SPAdes run with different parameters, or several different assemblers). -
    - - - - -

    4. Citation

    -

    - If you use SPAdes in your research, please include Nurk, Bankevich et al., 2013 in your reference list. You may also add Bankevich, Nurk et al., 2012 instead. - -

    - If you use PacBio or Nanopore reads, you may also cite Antipov et al., 2015. If you use multiple paired-end and/or mate-pair libraries you may also cite papers describing SPAdes repeat resolution algorithms Prjibelski et al., 2014 and Vasilinetc et al., 2015. If you use plasmidSPAdes please cite Antipov et al., 2016. - -

    - For the information about dipSPAdes and truSPAdes papers see dipSPAdes manual and truSPAdes manual respectively. - -

    - In addition, we would like to list your publications that use our software on our website. Please email the reference, the name of your lab, department and institution to spades.support@cab.spbu.ru. -
    - - - -

    5. Feedback and bug reports

    -

    - Your comments, bug reports, and suggestions are very welcomed. They will help us to further improve SPAdes. - -

    - If you have any troubles running SPAdes, please send us params.txt and spades.log from the directory <output_dir>. - -

    - Address for communications: spades.support@cab.spbu.ru. - -




    - - - diff --git a/src/SPAdes-3.10.1-Linux/share/spades/rnaspades_manual.html b/src/SPAdes-3.10.1-Linux/share/spades/rnaspades_manual.html deleted file mode 100644 index 5a23b1e..0000000 --- a/src/SPAdes-3.10.1-Linux/share/spades/rnaspades_manual.html +++ /dev/null @@ -1,93 +0,0 @@ - - - rnaSPAdes manual - - - -

    rnaSPAdes manual

    - -1. About rnaSPAdes
    -2. rnaSPAdes specifics
    -    2.1. Running rnaSPAdes
    -    2.2. rnaSPAdes output
    -3. Assembly evaluation
    -4. Citation
    -5. Feedback and bug reports
    - - -

    1 About rnaSPAdes

    - -

    rnaSPAdes is a tool for de novo transcriptome assembly from RNA-Seq data and is suitable for all kind of organisms. rnaSPAdes is a part of SPAdes package since version 3.9. Information about SPAdes download, requirements, installation and basic options can be found in SPAdes manual. Below you may find information about differences between SPAdes and rnaSPAdes. - - -

    2 rnaSPAdes specifics

    - - -

    2.1 Running rnaSPAdes

    -

    -To run rnaSPAdes use - -

    -
    -    rnaspades.py [options] -o <output_dir>
    -
    -
    - -or - -
    -
    -    spades.py --rna [options] -o <output_dir>
    -
    -
    - -Note that we assume that SPAdes installation directory is added to the PATH variable (provide full path to rnaSPAdes executable otherwise: <rnaspades installation dir>/rnaspades.py). - -

    Here are several notes regarding options : -

      -
    • rnaSPAdes can take as an input only one paired-end library and multiple single-end libraries.
    • -
    • rnaSPAdes does not support --careful and --cov-cutoff options.
    • -
    • rnaSPAdes is not compatible with other pipeline options such as --meta, --sc and --plasmid.
    • -
    • rnaSPAdes works using only a single k-mer size (55 by the default). We strongly recommend not to change this parameter. In case your RNA-Seq data set contains long Illumina reads (150 bp and longer) you may try to use longer k-mer size (approximately half of the read length). In case you have any doubts about your run, do not hesitate to contact us using e-mail given below.
    • -
    - - -

    2.2 rnaSPAdes output

    -

    -rnaSPAdes outputs only one FASTA file named transcripts.fasta. The corresponding file with paths in the assembly_graph.fastg is transcripts.paths. - -

    - Contigs/scaffolds names in rnaSPAdes output FASTA files have the following format:
    >NODE_97_length_6237_cov_11.9819_g8_i2
    Similarly to SPAdes, 97 is the number of the transcript, 6237 is its sequence length in nucleotides and 11.9819 is the k-mer coverage. Note that the k-mer coverage is always lower than the read (per-base) coverage. g8_i2 correspond to the gene number 8 and isoform number 2 within this gene. Transcripts with the same gene number are presumably received from same or somewhat similar (e.g. paralogous) genes. Note, that the prediction is based on the presence of shared sequences in the transcripts and is very approximate. - - -

    3 Assembly evaluation

    - -

    - rnaQUAST may be used for transcriptome assembly quality assessment for model organisms when reference genome and gene database are available. rnaQUAST also includes BUSCO and GeneMarkS-T tools for de novo evaluation. -
    - - -

    4 Citation

    -

    -If you use rnaSPAdes in your research, please include main SPAdes paper Bankevich, Nurk et al., 2012 in your reference list. Paper on rnaSPAdes is to be submitted. - - - -

    5 Feedback and bug reports

    -

    - Your comments, bug reports, and suggestions are very welcomed. They will help us to further improve rnaSPAdes. - -

    - If you have any troubles running rnaSPAdes, please send us params.txt and spades.log from the directory <output_dir>. - -

    - Address for communications: spades.support@cab.spbu.ru. - -




    - - - diff --git a/src/SPAdes-3.10.1-Linux/share/spades/spades_pipeline/corrector_logic.py b/src/SPAdes-3.10.1-Linux/share/spades/spades_pipeline/corrector_logic.py deleted file mode 100644 index 7459c5f..0000000 --- a/src/SPAdes-3.10.1-Linux/share/spades/spades_pipeline/corrector_logic.py +++ /dev/null @@ -1,76 +0,0 @@ -#!/usr/bin/python -O - -############################################################################ -# Copyright (c) 2015 Saint Petersburg State University -# Copyright (c) 2011-2014 Saint Petersburg Academic University -# All Rights Reserved -# See file LICENSE for details. -############################################################################ - - -import os -import sys -import shutil -import support -import process_cfg -from site import addsitedir -from distutils import dir_util - - - -def prepare_config_corr(filename, cfg, ext_python_modules_home): - addsitedir(ext_python_modules_home) - if sys.version.startswith('2.'): - import pyyaml2 as pyyaml - elif sys.version.startswith('3.'): - import pyyaml3 as pyyaml - data = pyyaml.load(open(filename, 'r')) - data["dataset"] = cfg.dataset - data["output_dir"] = cfg.output_dir - data["work_dir"] = process_cfg.process_spaces(cfg.tmp_dir) - #data["hard_memory_limit"] = cfg.max_memory - data["max_nthreads"] = cfg.max_threads - data["bwa"] = cfg.bwa - file_c = open(filename, 'w') - pyyaml.dump(data, file_c, default_flow_style = False, default_style='"', width=100500) - file_c.close() - - - -def run_corrector(configs_dir, execution_home, cfg, - ext_python_modules_home, log, to_correct, result): - addsitedir(ext_python_modules_home) - if sys.version.startswith('2.'): - import pyyaml2 as pyyaml - elif sys.version.startswith('3.'): - import pyyaml3 as pyyaml - - dst_configs = os.path.join(cfg.output_dir, "configs") - if os.path.exists(dst_configs): - shutil.rmtree(dst_configs) - dir_util.copy_tree(os.path.join(configs_dir, "corrector"), dst_configs, preserve_times=False) - cfg_file_name = os.path.join(dst_configs, "corrector.info") - - cfg.tmp_dir = support.get_tmp_dir(prefix="corrector_") - - prepare_config_corr(cfg_file_name, cfg, ext_python_modules_home) - binary_name = "corrector" - - command = [os.path.join(execution_home, binary_name), - os.path.abspath(cfg_file_name), os.path.abspath(to_correct)] - - log.info("\n== Running contig polishing tool: " + ' '.join(command) + "\n") - - - log.info("\n== Dataset description file was created: " + cfg_file_name + "\n") - - support.sys_call(command, log) - if not os.path.isfile(result): - support.error("Mismatch correction finished abnormally: " + result + " not found!") - if os.path.isdir(cfg.tmp_dir): - shutil.rmtree(cfg.tmp_dir) - - - - - diff --git a/src/SPAdes-3.10.1-Linux/share/spades/spades_pipeline/dipspades_logic.py b/src/SPAdes-3.10.1-Linux/share/spades/spades_pipeline/dipspades_logic.py deleted file mode 100644 index b85ea95..0000000 --- a/src/SPAdes-3.10.1-Linux/share/spades/spades_pipeline/dipspades_logic.py +++ /dev/null @@ -1,276 +0,0 @@ -#!/usr/bin/env python - -############################################################################ -# Copyright (c) 2015 Saint Petersburg State University -# Copyright (c) 2011-2014 Saint Petersburg Academic University -# All Rights Reserved -# See file LICENSE for details. -############################################################################ - - -import sys -import getopt -import os -import logging -import shutil -import errno -import options_storage -import support -import process_cfg -from distutils import dir_util -from os.path import abspath, expanduser - - -class DS_Args_List: - long_options = "expect-gaps expect-rearrangements hap= threads= memory= tmp-dir= dsdebug hap-assembly dsK= saves= start-from=".split() - short_options = "o:t:m:" - - -class DS_Args: - max_threads = options_storage.THREADS - max_memory = options_storage.MEMORY - tmp_dir = None - allow_gaps = False - weak_align = False - haplocontigs_fnames = [] - output_dir = "" - haplocontigs = "" - dev_mode = False - haplotype_assembly = False - k = 55 - saves = "" - start_from = "dipspades" - - -def print_ds_args(ds_args, log): - log.info("dipSPAdes parameters:") - log.info("\tK value for dipSPAdes: " + str(ds_args.k)) - log.info("\tExpect gaps: " + str(ds_args.allow_gaps)) - log.info("\tExpect rearrangements: " + str(ds_args.weak_align)) - log.info("\tFiles with haplocontigs : " + str(ds_args.haplocontigs_fnames)) - log.info("\tHaplotype assembly stage: " + str(ds_args.haplotype_assembly)) - log.info("\tOutput directory: " + str(ds_args.output_dir)) - log.info("") - log.info("\tDir for temp files: " + str(ds_args.tmp_dir)) - log.info("\tThreads: " + str(ds_args.max_threads)) - log.info("\tMemory limit (in Gb): " + str(ds_args.max_memory)) - - -# src_config_dir - path of dipspades configs -def copy_configs(src_config_dir, dst_config_dir): - if os.path.exists(dst_config_dir): - shutil.rmtree(dst_config_dir) - dir_util.copy_tree(src_config_dir, dst_config_dir, preserve_times=False) - - -def prepare_configs(src_config_dir, ds_args, log): - config_dir = os.path.join(ds_args.output_dir, "dipspades_configs") - copy_configs(src_config_dir, config_dir) - #log.info("dipSPAdes configs were copied to " + config_dir) - config_fname = os.path.join(config_dir, "config.info") - return os.path.abspath(config_fname) - - -def write_haplocontigs_in_file(filename, haplocontigs): - hapfile = open(filename, 'w') - for hapcontig in haplocontigs: - hapfile.write(hapcontig + "\n") - hapfile.close() - -def ParseStartPoint(start_point_arg, log): - if start_point_arg == 'pbr': - return 'dipspades:polymorphic_br' - elif start_point_arg == 'kmg': - return 'dipspades:kmer_gluer' - elif start_point_arg == 'cc': - return 'dipspades:consensus_construction' - elif start_point_arg == 'ha': - return 'dipspades:haplotype_assembly' - log.info("ERROR: Start point " + start_point_arg + " was undefined") - sys.exit(1) - -def parse_arguments(argv, log): - try: - options, not_options = getopt.gnu_getopt(argv, DS_Args_List.short_options, DS_Args_List.long_options) - except getopt.GetoptError: - _, exc, _ = sys.exc_info() - sys.stderr.write(str(exc) + "\n") - sys.stderr.flush() - options_storage.usage("", dipspades=True) - sys.exit(1) - - ds_args = DS_Args() - for opt, arg in options: - if opt == '-o': - ds_args.output_dir = abspath(expanduser(arg)) - elif opt == '--expect-gaps': - ds_args.allow_gaps = True - elif opt == '--expect-rearrangements': - ds_args.weak_align = True - elif opt == '--hap': - ds_args.haplocontigs_fnames.append(support.check_file_existence(arg, 'haplocontigs', log, dipspades=True)) - elif opt == '-t' or opt == "--threads": - ds_args.max_threads = int(arg) - elif opt == '-m' or opt == "--memory": - ds_args.max_memory = int(arg) - elif opt == '--tmp-dir': - ds_args.tmp_dir = abspath(expanduser(arg)) - elif opt == '--dsdebug': - ds_args.dev_mode = True - elif opt == '--hap-assembly': - ds_args.haplotype_assembly = True - elif opt == '--dsK': - ds_args.k = int(arg) - elif opt == '--saves': - ds_args.saves = os.path.abspath(arg) - ds_args.dev_mode = True - elif opt == '--start-from': - ds_args.start_from = ParseStartPoint(arg, log) - ds_args.dev_mode = True - ds_args.haplocontigs = os.path.join(ds_args.output_dir, "haplocontigs") - - if not ds_args.output_dir: - support.error("the output_dir is not set! It is a mandatory parameter (-o output_dir).", log, dipspades=True) - if not ds_args.haplocontigs_fnames and ds_args.start_from == 'dipspades': - support.error("cannot start dipSPAdes without at least one haplocontigs file!", log, dipspades=True) - if not ds_args.tmp_dir: - ds_args.tmp_dir = os.path.join(ds_args.output_dir, options_storage.TMP_DIR) - - if ds_args.start_from != 'dipspades' and ds_args.saves == '': - support.error("saves were not defined! dipSPAdes can not start from " + ds_args.start_from) - - return ds_args - - -def prepare_config(config_fname, ds_args, log): - args_dict = dict() - args_dict["tails_lie_on_bulges"] = process_cfg.bool_to_str(not ds_args.allow_gaps) - args_dict["align_bulge_sides"] = process_cfg.bool_to_str(not ds_args.weak_align) - args_dict["haplocontigs"] = process_cfg.process_spaces(ds_args.haplocontigs) - args_dict["output_dir"] = process_cfg.process_spaces(ds_args.output_dir) - args_dict["developer_mode"] = process_cfg.bool_to_str(ds_args.dev_mode) - args_dict["tmp_dir"] = process_cfg.process_spaces(ds_args.tmp_dir) - args_dict["max_threads"] = ds_args.max_threads - args_dict["max_memory"] = ds_args.max_memory - args_dict["output_base"] = "" - args_dict["ha_enabled"] = process_cfg.bool_to_str(ds_args.haplotype_assembly) - args_dict["K"] = str(ds_args.k) - args_dict['saves'] = ds_args.saves - args_dict['entry_point'] = ds_args.start_from - process_cfg.substitute_params(config_fname, args_dict, log) - - -def print_ds_output(output_dir, log): - consensus_file = os.path.join(output_dir, "consensus_contigs.fasta") - if os.path.exists(consensus_file): - log.info(" * Assembled consensus contigs are in: " + support.process_spaces(consensus_file)) - - paired_consensus_file = os.path.join(output_dir, "paired_consensus_contigs.fasta") - if os.path.exists(paired_consensus_file): - log.info(" * Assembled paired consensus contigs are in: " + support.process_spaces(paired_consensus_file)) - - unpaired_consensus_file = os.path.join(output_dir, "unpaired_consensus_contigs.fasta") - if os.path.exists(unpaired_consensus_file): - log.info(" * Assembled unpaired consensus contigs are in: " + support.process_spaces(unpaired_consensus_file)) - - hapalignment_file = os.path.join(output_dir, "haplocontigs_alignent") - if os.path.exists(hapalignment_file): - log.info(" * Alignment of haplocontigs is in: " + support.process_spaces(hapalignment_file)) - - haplotype_assembly_file = os.path.join(output_dir, "haplotype_assembly.out") - if os.path.exists(haplotype_assembly_file): - log.info(" * Results of haplotype assembly are in: " + support.process_spaces(haplotype_assembly_file)) - - consregions_file = os.path.join(output_dir, "conservative_regions.fasta") - if os.path.exists(consregions_file): - log.info(" * Conservative regions are in: " + support.process_spaces(consregions_file)) - - possconsregions_file = os.path.join(output_dir, "possibly_conservative_regions.fasta") - if os.path.exists(possconsregions_file): - log.info(" * Possibly conservative regions are in: " + support.process_spaces(possconsregions_file)) - - -def main(ds_args_list, general_args_list, spades_home, bin_home): - log = logging.getLogger('dipspades') - log.setLevel(logging.DEBUG) - console = logging.StreamHandler(sys.stdout) - console.setFormatter(logging.Formatter('%(message)s')) - console.setLevel(logging.DEBUG) - log.addHandler(console) - - support.check_binaries(bin_home, log) - ds_args = parse_arguments(ds_args_list, log) - - if not os.path.exists(ds_args.output_dir): - os.makedirs(ds_args.output_dir) - log_filename = os.path.join(ds_args.output_dir, "dipspades.log") - if os.path.exists(log_filename): - os.remove(log_filename) - log_handler = logging.FileHandler(log_filename, mode='a') - log.addHandler(log_handler) - - params_filename = os.path.join(ds_args.output_dir, "params.txt") - params_handler = logging.FileHandler(params_filename, mode='a') - log.addHandler(params_handler) - - log.info("\n") - log.info("General command line: " + " ".join(general_args_list) + "\n") - log.info("dipSPAdes command line: " + " ".join(ds_args_list) + "\n") - print_ds_args(ds_args, log) - log.removeHandler(params_handler) - - log.info("\n======= dipSPAdes started. Log can be found here: " + log_filename + "\n") - write_haplocontigs_in_file(ds_args.haplocontigs, ds_args.haplocontigs_fnames) - - config_fname = prepare_configs(os.path.join(spades_home, "configs", "dipspades"), ds_args, log) - ds_args.tmp_dir = support.get_tmp_dir(prefix="dipspades_", base_dir=ds_args.tmp_dir) - prepare_config(config_fname, ds_args, log) - - try: - log.info("===== Assembling started.\n") - binary_path = os.path.join(bin_home, "dipspades") - command = [binary_path, config_fname] - support.sys_call(command, log) - log.info("\n===== Assembling finished.\n") - print_ds_output(ds_args.output_dir, log) - if os.path.isdir(ds_args.tmp_dir): - shutil.rmtree(ds_args.tmp_dir) - log.info("\n======= dipSPAdes finished.\n") - log.info("dipSPAdes log can be found here: " + log_filename + "\n") - log.info("Thank you for using dipSPAdes!") - log.removeHandler(log_handler) - except Exception: - exc_type, exc_value, _ = sys.exc_info() - if exc_type == SystemExit: - sys.exit(exc_value) - else: - if exc_type == OSError and exc_value.errno == errno.ENOEXEC: # Exec format error - support.error("It looks like you are using SPAdes binaries for another platform.\n" + - support.get_spades_binaries_info_message(), dipspades=True) - else: - log.exception(exc_value) - support.error("exception caught: %s" % exc_type, log) - except BaseException: # since python 2.5 system-exiting exceptions (e.g. KeyboardInterrupt) are derived from BaseException - exc_type, exc_value, _ = sys.exc_info() - if exc_type == SystemExit: - sys.exit(exc_value) - else: - log.exception(exc_value) - support.error("exception caught: %s" % exc_type, log, dipspades=True) - - -if __name__ == '__main__': - self_dir_path = os.path.abspath(os.path.dirname(os.path.realpath(__file__))) - spades_init_candidate1 = os.path.join(self_dir_path, "../../spades_init.py") - spades_init_candidate2 = os.path.join(self_dir_path, "../../../bin/spades_init.py") - if os.path.isfile(spades_init_candidate1): - sys.path.append(os.path.dirname(spades_init_candidate1)) - elif os.path.isfile(spades_init_candidate2): - sys.path.append(os.path.dirname(spades_init_candidate2)) - else: - sys.stderr.write("Cannot find spades_init.py! Aborting..\n") - sys.stderr.flush() - sys.exit(1) - import spades_init - spades_init.init() - main(sys.argv, "", spades_init.spades_home, spades_init.bin_home) diff --git a/src/SPAdes-3.10.1-Linux/share/spades/spades_pipeline/hammer_logic.py b/src/SPAdes-3.10.1-Linux/share/spades/spades_pipeline/hammer_logic.py deleted file mode 100644 index 1d971b8..0000000 --- a/src/SPAdes-3.10.1-Linux/share/spades/spades_pipeline/hammer_logic.py +++ /dev/null @@ -1,161 +0,0 @@ -#!/usr/bin/env python - -############################################################################ -# Copyright (c) 2015 Saint Petersburg State University -# Copyright (c) 2011-2014 Saint Petersburg Academic University -# All Rights Reserved -# See file LICENSE for details. -############################################################################ - - -import os -import sys -import glob -import shutil -import support -import options_storage -import process_cfg -from site import addsitedir -from distutils import dir_util -from os.path import isfile - - -def compress_dataset_files(dataset_data, ext_python_modules_home, max_threads, log): - log.info("\n== Compressing corrected reads (with gzip)") - to_compress = [] - for reads_library in dataset_data: - for key, value in reads_library.items(): - if key.endswith('reads'): - compressed_reads_filenames = [] - for reads_file in value: - compressed_reads_filenames.append(reads_file + ".gz") - if not isfile(reads_file): - if isfile(compressed_reads_filenames[-1]): - continue # already compressed (--continue/--restart-from case) - support.error('something went wrong and file with corrected reads (' + reads_file + ') is missing!', log) - to_compress.append(reads_file) - reads_library[key] = compressed_reads_filenames - if len(to_compress): - pigz_path = support.which('pigz') - if pigz_path: - for reads_file in to_compress: - support.sys_call([pigz_path, '-f', '-7', '-p', str(max_threads), reads_file], log) - else: - addsitedir(ext_python_modules_home) - if sys.version.startswith('2.'): - from joblib2 import Parallel, delayed - elif sys.version.startswith('3.'): - from joblib3 import Parallel, delayed - n_jobs = min(len(to_compress), max_threads) - outputs = Parallel(n_jobs=n_jobs)(delayed(support.sys_call)(['gzip', '-f', '-7', reads_file]) for reads_file in to_compress) - for output in outputs: - if output: - log.info(output) - - -def remove_not_corrected_reads(output_dir): - for not_corrected in glob.glob(os.path.join(output_dir, "*.bad.fastq")): - os.remove(not_corrected) - - -def prepare_config_bh(filename, cfg, log): - subst_dict = dict() - - subst_dict["dataset"] = process_cfg.process_spaces(cfg.dataset_yaml_filename) - subst_dict["input_working_dir"] = process_cfg.process_spaces(cfg.tmp_dir) - subst_dict["output_dir"] = process_cfg.process_spaces(cfg.output_dir) - subst_dict["general_max_iterations"] = cfg.max_iterations - subst_dict["general_max_nthreads"] = cfg.max_threads - subst_dict["count_merge_nthreads"] = cfg.max_threads - subst_dict["bayes_nthreads"] = cfg.max_threads - subst_dict["expand_nthreads"] = cfg.max_threads - subst_dict["correct_nthreads"] = cfg.max_threads - subst_dict["general_hard_memory_limit"] = cfg.max_memory - if "qvoffset" in cfg.__dict__: - subst_dict["input_qvoffset"] = cfg.qvoffset - if "count_filter_singletons" in cfg.__dict__: - subst_dict["count_filter_singletons"] = cfg.count_filter_singletons - if "read_buffer_size" in cfg.__dict__: - subst_dict["count_split_buffer"] = cfg.read_buffer_size - process_cfg.substitute_params(filename, subst_dict, log) - - -def prepare_config_ih(filename, cfg, ext_python_modules_home): - addsitedir(ext_python_modules_home) - if sys.version.startswith('2.'): - import pyyaml2 as pyyaml - elif sys.version.startswith('3.'): - import pyyaml3 as pyyaml - - data = pyyaml.load(open(filename, 'r')) - data["dataset"] = cfg.dataset_yaml_filename - data["working_dir"] = cfg.tmp_dir - data["output_dir"] = cfg.output_dir - data["hard_memory_limit"] = cfg.max_memory - data["max_nthreads"] = cfg.max_threads - pyyaml.dump(data, open(filename, 'w'), default_flow_style = False, default_style='"', width=100500) - - -def run_hammer(corrected_dataset_yaml_filename, configs_dir, execution_home, cfg, - dataset_data, ext_python_modules_home, only_compressing_is_needed, log): - addsitedir(ext_python_modules_home) - if sys.version.startswith('2.'): - import pyyaml2 as pyyaml - elif sys.version.startswith('3.'): - import pyyaml3 as pyyaml - - # not all reads need processing - if support.get_lib_ids_by_type(dataset_data, options_storage.LONG_READS_TYPES): - not_used_dataset_data = support.get_libs_by_type(dataset_data, options_storage.LONG_READS_TYPES) - to_correct_dataset_data = support.rm_libs_by_type(dataset_data, options_storage.LONG_READS_TYPES) - to_correct_dataset_yaml_filename = os.path.join(cfg.output_dir, "to_correct.yaml") - pyyaml.dump(to_correct_dataset_data, open(to_correct_dataset_yaml_filename, 'w'), default_flow_style = False, default_style='"', width=100500) - cfg.dataset_yaml_filename = to_correct_dataset_yaml_filename - else: - not_used_dataset_data = None - - if not only_compressing_is_needed: - dst_configs = os.path.join(cfg.output_dir, "configs") - if os.path.exists(dst_configs): - shutil.rmtree(dst_configs) - if cfg.iontorrent: - dir_util.copy_tree(os.path.join(configs_dir, "ionhammer"), dst_configs, preserve_times=False) - cfg_file_name = os.path.join(dst_configs, "ionhammer.cfg") - else: - dir_util.copy_tree(os.path.join(configs_dir, "hammer"), dst_configs, preserve_times=False) - cfg_file_name = os.path.join(dst_configs, "config.info") - - cfg.tmp_dir = support.get_tmp_dir(prefix="hammer_") - if cfg.iontorrent: - prepare_config_ih(cfg_file_name, cfg, ext_python_modules_home) - binary_name = "ionhammer" - else: - prepare_config_bh(cfg_file_name, cfg, log) - binary_name = "hammer" - - command = [os.path.join(execution_home, binary_name), - os.path.abspath(cfg_file_name)] - - log.info("\n== Running read error correction tool: " + ' '.join(command) + "\n") - support.sys_call(command, log) - if not os.path.isfile(corrected_dataset_yaml_filename): - support.error("read error correction finished abnormally: " + corrected_dataset_yaml_filename + " not found!") - else: - log.info("\n===== Skipping %s (already processed). \n" % "read error correction tool") - support.continue_from_here(log) - - corrected_dataset_data = pyyaml.load(open(corrected_dataset_yaml_filename, 'r')) - remove_not_corrected_reads(cfg.output_dir) - is_changed = False - if cfg.gzip_output: - is_changed = True - compress_dataset_files(corrected_dataset_data, ext_python_modules_home, cfg.max_threads, log) - if not_used_dataset_data: - is_changed = True - corrected_dataset_data += not_used_dataset_data - if is_changed: - pyyaml.dump(corrected_dataset_data, open(corrected_dataset_yaml_filename, 'w'), default_flow_style = False, default_style='"', width=100500) - log.info("\n== Dataset description file was created: " + corrected_dataset_yaml_filename + "\n") - - if os.path.isdir(cfg.tmp_dir): - shutil.rmtree(cfg.tmp_dir) diff --git a/src/SPAdes-3.10.1-Linux/share/spades/spades_pipeline/options_storage.py b/src/SPAdes-3.10.1-Linux/share/spades/spades_pipeline/options_storage.py deleted file mode 100644 index 1919e5a..0000000 --- a/src/SPAdes-3.10.1-Linux/share/spades/spades_pipeline/options_storage.py +++ /dev/null @@ -1,514 +0,0 @@ -#!/usr/bin/env python - -############################################################################ -# Copyright (c) 2015 Saint Petersburg State University -# Copyright (c) 2011-2014 Saint Petersburg Academic University -# All Rights Reserved -# See file LICENSE for details. -############################################################################ - -import os -import sys -import support -from os.path import basename - -SUPPORTED_PYTHON_VERSIONS = ['2.4', '2.5', '2.6', '2.7', '3.2', '3.3', '3.4', '3.5'] -# allowed reads extensions for BayesHammer and for thw whole SPAdes pipeline -BH_ALLOWED_READS_EXTENSIONS = ['.fq', '.fastq', '.bam'] -CONTIGS_ALLOWED_READS_EXTENSIONS = ['.fa', '.fasta'] -ALLOWED_READS_EXTENSIONS = BH_ALLOWED_READS_EXTENSIONS + CONTIGS_ALLOWED_READS_EXTENSIONS -# reads could be gzipped -BH_ALLOWED_READS_EXTENSIONS += [x + '.gz' for x in BH_ALLOWED_READS_EXTENSIONS] -CONTIGS_ALLOWED_READS_EXTENSIONS += [x + '.gz' for x in CONTIGS_ALLOWED_READS_EXTENSIONS] -ALLOWED_READS_EXTENSIONS += [x + '.gz' for x in ALLOWED_READS_EXTENSIONS] - -# we support up to MAX_LIBS_NUMBER libs for each type of short-reads libs -MAX_LIBS_NUMBER = 9 -OLD_STYLE_READS_OPTIONS = ["--12", "-1", "-2", "-s"] -SHORT_READS_TYPES = {"pe": "paired-end", "s": "single", "mp": "mate-pairs", "hqmp": "hq-mate-pairs", "nxmate": "nxmate"} -# other libs types: -LONG_READS_TYPES = ["pacbio", "sanger", "nanopore", "tslr", "trusted-contigs", "untrusted-contigs"] - -# final contigs and scaffolds names -contigs_name = "contigs.fasta" -scaffolds_name = "scaffolds.fasta" -assembly_graph_name = "assembly_graph.fastg" -assembly_graph_name_gfa = "assembly_graph.gfa" -contigs_paths = "contigs.paths" -scaffolds_paths = "scaffolds.paths" -transcripts_name = "transcripts.fasta" -transcripts_paths = "transcripts.paths" - -#other constants -MIN_K = 1 -MAX_K = 127 -THRESHOLD_FOR_BREAKING_SCAFFOLDS = 3 -THRESHOLD_FOR_BREAKING_ADDITIONAL_CONTIGS = 10 - -#default values constants -THREADS = 16 -MEMORY = 250 -K_MERS_RNA = [55] -K_MERS_SHORT = [21,33,55] -K_MERS_150 = [21,33,55,77] -K_MERS_250 = [21,33,55,77,99,127] - -ITERATIONS = 1 -TMP_DIR = "tmp" - -### START OF OPTIONS -# basic options -output_dir = None -single_cell = False -iontorrent = False -meta = False -rna = False -large_genome = False -test_mode = False -plasmid = False - -# pipeline options -only_error_correction = False -only_assembler = False -disable_gzip_output = None -disable_rr = None -careful = None -diploid_mode = False - -# advanced options -continue_mode = False -developer_mode = None -dataset_yaml_filename = None -threads = None -memory = None -tmp_dir = None -k_mers = None -qvoffset = None # auto-detect by default -cov_cutoff = 'off' # default is 'off' - -# hidden options -mismatch_corrector = None -reference = None -series_analysis = None -configs_dir = None -iterations = None -bh_heap_check = None -spades_heap_check = None -read_buffer_size = None -### END OF OPTIONS - -# for restarting SPAdes -restart_from = None -restart_careful = None -restart_mismatch_corrector = None -restart_disable_gzip_output = None -restart_disable_rr = None -restart_threads = None -restart_memory = None -restart_tmp_dir = None -restart_k_mers = None -original_k_mers = None -restart_qvoffset = None -restart_cov_cutoff = None -restart_developer_mode = None -restart_reference = None -restart_configs_dir = None -restart_read_buffer_size = None - -# for running to specific check-point -stop_after = None -run_completed = False - -#truseq options -truseq_mode = False -correct_scaffolds = False -run_truseq_postprocessing = False - -dict_of_prefixes = dict() -dict_of_rel2abs = dict() - -# list of spades.py options -long_options = "12= threads= memory= tmp-dir= iterations= phred-offset= sc iontorrent meta large-genome rna plasmid "\ - "only-error-correction only-assembler "\ - "disable-gzip-output disable-gzip-output:false disable-rr disable-rr:false " \ - "help version test debug debug:false reference= series-analysis= config-file= dataset= "\ - "bh-heap-check= spades-heap-check= read-buffer-size= help-hidden "\ - "mismatch-correction mismatch-correction:false careful careful:false "\ - "continue restart-from= diploid truseq cov-cutoff= configs-dir= stop-after=".split() -short_options = "o:1:2:s:k:t:m:i:hv" - -# adding multiple paired-end, mate-pair and other (long reads) libraries support -reads_options = [] -for i in range(MAX_LIBS_NUMBER): - for type in SHORT_READS_TYPES.keys(): - if type == 's': # single - reads_options += ["s%d=" % (i+1)] - elif type == 'nxmate': # special case: only left and right reads - reads_options += ("%s%d-1= %s%d-2=" % tuple([type, i + 1] * 2)).split() - else: # paired-end, mate-pairs, hq-mate-pairs - reads_options += ("%s%d-1= %s%d-2= %s%d-12= %s%d-s= %s%d-rf %s%d-fr %s%d-ff" % tuple([type, i + 1] * 7)).split() -reads_options += list(map(lambda x: x + '=', LONG_READS_TYPES)) -long_options += reads_options -# for checking whether option corresponds to reads or not -reads_options = list(map(lambda x: "--" + x.split('=')[0], reads_options)) -reads_options += OLD_STYLE_READS_OPTIONS - - -def get_mode(): - mode = None - if basename(sys.argv[0]) == "rnaspades.py": - mode = 'rna' - elif basename(sys.argv[0]) == "plasmidspades.py": - mode = 'plasmid' - elif basename(sys.argv[0]) == "metaspades.py": - mode = 'meta' - return mode - - -def version(spades_version, mode=None): - sys.stderr.write("SPAdes v" + str(spades_version)) - if mode is None: - mode = get_mode() - if mode is not None: - sys.stderr.write(" [" + mode + "SPAdes mode]") - sys.stderr.write("\n") - sys.stderr.flush() - - -def usage(spades_version, show_hidden=False, mode=None): - sys.stderr.write("SPAdes genome assembler v" + str(spades_version)) - if mode is None: - mode = get_mode() - if mode is not None: - sys.stderr.write(" [" + mode + "SPAdes mode]") - sys.stderr.write("\n\n") - sys.stderr.write("Usage: " + str(sys.argv[0]) + " [options] -o " + "\n") - sys.stderr.write("" + "\n") - sys.stderr.write("Basic options:" + "\n") - sys.stderr.write("-o\t\tdirectory to store all the resulting files (required)" + "\n") - if mode is None: # nothing special, just regular spades.py - sys.stderr.write("--sc\t\t\tthis flag is required for MDA (single-cell) data" + "\n") - sys.stderr.write("--meta\t\t\tthis flag is required for metagenomic sample data" + "\n") - sys.stderr.write("--rna\t\t\tthis flag is required for RNA-Seq data \n") - sys.stderr.write("--plasmid\t\truns plasmidSPAdes pipeline for plasmid detection \n") - - sys.stderr.write("--iontorrent\t\tthis flag is required for IonTorrent data" + "\n") - sys.stderr.write("--test\t\t\truns SPAdes on toy dataset" + "\n") - sys.stderr.write("-h/--help\t\tprints this usage message" + "\n") - sys.stderr.write("-v/--version\t\tprints version" + "\n") - - sys.stderr.write("" + "\n") - if mode != "dip": - sys.stderr.write("Input data:" + "\n") - else: - sys.stderr.write("Input reads:" + "\n") - sys.stderr.write("--12\t\tfile with interlaced forward and reverse"\ - " paired-end reads" + "\n") - sys.stderr.write("-1\t\tfile with forward paired-end reads" + "\n") - sys.stderr.write("-2\t\tfile with reverse paired-end reads" + "\n") - sys.stderr.write("-s\t\tfile with unpaired reads" + "\n") - sys.stderr.write("--pe<#>-12\t\tfile with interlaced"\ - " reads for paired-end library number <#> (<#> = 1,2,..,9)" + "\n") - sys.stderr.write("--pe<#>-1\t\tfile with forward reads"\ - " for paired-end library number <#> (<#> = 1,2,..,9)" + "\n") - sys.stderr.write("--pe<#>-2\t\tfile with reverse reads"\ - " for paired-end library number <#> (<#> = 1,2,..,9)" + "\n") - sys.stderr.write("--pe<#>-s\t\tfile with unpaired reads"\ - " for paired-end library number <#> (<#> = 1,2,..,9)" + "\n") - sys.stderr.write("--pe<#>-\torientation of reads"\ - " for paired-end library number <#> (<#> = 1,2,..,9; = fr, rf, ff)" + "\n") - sys.stderr.write("--s<#>\t\t\tfile with unpaired reads"\ - " for single reads library number <#> (<#> = 1,2,..,9)" + "\n") - if mode not in ["rna", "meta"]: - sys.stderr.write("--mp<#>-12\t\tfile with interlaced"\ - " reads for mate-pair library number <#> (<#> = 1,2,..,9)" + "\n") - sys.stderr.write("--mp<#>-1\t\tfile with forward reads"\ - " for mate-pair library number <#> (<#> = 1,2,..,9)" + "\n") - sys.stderr.write("--mp<#>-2\t\tfile with reverse reads"\ - " for mate-pair library number <#> (<#> = 1,2,..,9)" + "\n") - sys.stderr.write("--mp<#>-s\t\tfile with unpaired reads"\ - " for mate-pair library number <#> (<#> = 1,2,..,9)" + "\n") - sys.stderr.write("--mp<#>-\torientation of reads"\ - " for mate-pair library number <#> (<#> = 1,2,..,9; = fr, rf, ff)" + "\n") - sys.stderr.write("--hqmp<#>-12\t\tfile with interlaced"\ - " reads for high-quality mate-pair library number <#> (<#> = 1,2,..,9)" + "\n") - sys.stderr.write("--hqmp<#>-1\t\tfile with forward reads"\ - " for high-quality mate-pair library number <#> (<#> = 1,2,..,9)" + "\n") - sys.stderr.write("--hqmp<#>-2\t\tfile with reverse reads"\ - " for high-quality mate-pair library number <#> (<#> = 1,2,..,9)" + "\n") - sys.stderr.write("--hqmp<#>-s\t\tfile with unpaired reads"\ - " for high-quality mate-pair library number <#> (<#> = 1,2,..,9)" + "\n") - sys.stderr.write("--hqmp<#>-\torientation of reads"\ - " for high-quality mate-pair library number <#> (<#> = 1,2,..,9; = fr, rf, ff)" + "\n") - sys.stderr.write("--nxmate<#>-1\t\tfile with forward reads"\ - " for Lucigen NxMate library number <#> (<#> = 1,2,..,9)" + "\n") - sys.stderr.write("--nxmate<#>-2\t\tfile with reverse reads"\ - " for Lucigen NxMate library number <#> (<#> = 1,2,..,9)" + "\n") - sys.stderr.write("--sanger\t\tfile with Sanger reads\n") - sys.stderr.write("--pacbio\t\tfile with PacBio reads\n") - sys.stderr.write("--nanopore\t\tfile with Nanopore reads\n") - sys.stderr.write("--tslr\t\tfile with TSLR-contigs\n") - sys.stderr.write("--trusted-contigs\t\tfile with trusted contigs\n") - sys.stderr.write("--untrusted-contigs\t\tfile with untrusted contigs\n") - if mode == "dip": - sys.stderr.write("Input haplocontigs:" + "\n") - sys.stderr.write("--hap\t\tfile with haplocontigs" + "\n") - - sys.stderr.write("" + "\n") - sys.stderr.write("Pipeline options:" + "\n") - if mode != "dip": - sys.stderr.write("--only-error-correction\truns only read error correction"\ - " (without assembling)" + "\n") - sys.stderr.write("--only-assembler\truns only assembling (without read error"\ - " correction)" + "\n") - if mode != "dip": - if mode not in ["rna", "meta"]: - sys.stderr.write("--careful\t\ttries to reduce number of mismatches and short indels" + "\n") - sys.stderr.write("--continue\t\tcontinue run from the last available check-point" + "\n") - sys.stderr.write("--restart-from\t\trestart run with updated options and from the specified check-point ('ec', 'as', 'k', 'mc')" + "\n") - sys.stderr.write("--disable-gzip-output\tforces error correction not to"\ - " compress the corrected reads" + "\n") - sys.stderr.write("--disable-rr\t\tdisables repeat resolution stage"\ - " of assembling" + "\n") - - if mode == "dip": - sys.stderr.write("" + "\n") - sys.stderr.write("DipSPAdes options:" + "\n") - sys.stderr.write("--expect-gaps\t\tindicates that significant number of gaps in coverage is expected" + "\n") - sys.stderr.write("--expect-rearrangements\tindicates that significant number of rearrangements between haplomes of diploid genome is expected" + "\n") - sys.stderr.write("--hap-assembly\t\tenables haplotype assembly phase" + "\n") - - sys.stderr.write("" + "\n") - sys.stderr.write("Advanced options:" + "\n") - sys.stderr.write("--dataset\t\tfile with dataset description in YAML format" + "\n") - sys.stderr.write("-t/--threads\t\t\tnumber of threads" + "\n") - sys.stderr.write("\t\t\t\t[default: %s]\n" % THREADS) - sys.stderr.write("-m/--memory\t\t\tRAM limit for SPAdes in Gb"\ - " (terminates if exceeded)" + "\n") - sys.stderr.write("\t\t\t\t[default: %s]\n" % MEMORY) - sys.stderr.write("--tmp-dir\t\tdirectory for temporary files" + "\n") - sys.stderr.write("\t\t\t\t[default: /tmp]" + "\n") - if mode != 'rna': - sys.stderr.write("-k\t\t\tcomma-separated list of k-mer sizes" \ - " (must be odd and" + "\n") - sys.stderr.write("\t\t\t\tless than " + str(MAX_K + 1) + ") [default: 'auto']" + "\n") - else: - sys.stderr.write("-k\t\t\t\tk-mer size (must be odd and less than " + str(MAX_K + 1) + ") " \ - "[default: " + str(K_MERS_RNA[0]) + "]\n") - - if mode not in ["rna", "meta"]: - sys.stderr.write("--cov-cutoff\t\t\tcoverage cutoff value (a positive float number, " - "or 'auto', or 'off') [default: 'off']" + "\n") - sys.stderr.write("--phred-offset\t<33 or 64>\tPHRED quality offset in the"\ - " input reads (33 or 64)" + "\n") - sys.stderr.write("\t\t\t\t[default: auto-detect]" + "\n") - - if show_hidden: - sys.stderr.write("" + "\n") - sys.stderr.write("HIDDEN options:" + "\n") - sys.stderr.write("--debug\t\t\t\truns SPAdes in debug mode (keeps intermediate output)" + "\n") - sys.stderr.write("--stop-after\t\truns SPAdes until the specified check-point ('ec', 'as', 'k', 'mc') inclusive" + "\n") - sys.stderr.write("--truseq\t\t\truns SPAdes in TruSeq mode\n") - sys.stderr.write("--mismatch-correction\t\truns post processing correction"\ - " of mismatches and short indels" + "\n") - sys.stderr.write("--reference\t\tfile with reference for deep analysis"\ - " (only in debug mode)" + "\n") - sys.stderr.write("--series-analysis\t\tconfig for metagenomics-series-augmented reassembly" + "\n") - sys.stderr.write("--configs-dir\t\tdirectory with configs" + "\n") - sys.stderr.write("-i/--iterations\t\t\tnumber of iterations for read error"\ - " correction [default: %s]\n" % ITERATIONS) - sys.stderr.write("--read-buffer-size\t\t\tsets size of read buffer for graph construction") - sys.stderr.write("--bh-heap-check\t\t\tsets HEAPCHECK environment variable"\ - " for BayesHammer" + "\n") - sys.stderr.write("--spades-heap-check\t\tsets HEAPCHECK environment variable"\ - " for SPAdes" + "\n") - sys.stderr.write("--large-genome\tEnables optimizations for large genomes \n") - sys.stderr.write("--help-hidden\tprints this usage message with all hidden options" + "\n") - - if show_hidden and mode == "dip": - sys.stderr.write("" + "\n") - sys.stderr.write("HIDDEN dipSPAdes options:" + "\n") - sys.stderr.write("--dsK\t\t\t\tk value used in dipSPAdes [default: '55']" + '\n') - sys.stderr.write("--dsdebug\t\t\tmakes saves and draws pictures" + '\n') - sys.stderr.write("--saves\t\tdirectory with saves which will be used for graph loading" + '\n') - sys.stderr.write("--start-from\t\tstart point of dipSPAdes:" + '\n') - sys.stderr.write(" pbr: polymorphic bulge remover\n kmg: gluer of equal k-mers\n cc: consensus constructor\n ha: haplotype assembly" + '\n') - - sys.stderr.flush() - - -def auto_K_allowed(): - return not k_mers and not single_cell and not iontorrent and not rna and not meta - # kmers were set by default, not SC, not IonTorrent data and not rna and temporary not meta - - -def set_default_values(): - global threads - global memory - global iterations - global disable_gzip_output - global disable_rr - global careful - global mismatch_corrector - global developer_mode - global qvoffset - global cov_cutoff - global tmp_dir - - if threads is None: - threads = THREADS - if memory is None: - if support.get_available_memory(): - memory = int(min(MEMORY, support.get_available_memory())) - else: - memory = MEMORY - if iterations is None: - iterations = ITERATIONS - if disable_gzip_output is None: - disable_gzip_output = False - if disable_rr is None: - disable_rr = False - if careful is None: - careful = False - if mismatch_corrector is None: - mismatch_corrector = False - if developer_mode is None: - developer_mode = False - if qvoffset == 'auto': - qvoffset = None - if cov_cutoff is None: - cov_cutoff = 'off' - if tmp_dir is None: - tmp_dir = os.path.join(output_dir, TMP_DIR) - - -def set_test_options(): - global output_dir - global single_cell - global test_mode - - output_dir = os.path.abspath('spades_test') - single_cell = False - meta = False - test_mode = True - - -def save_restart_options(log): - if dataset_yaml_filename: - support.error("you cannot specify --dataset with --restart-from option!", log) - if single_cell: - support.error("you cannot specify --sc with --restart-from option!", log) - if meta: - support.error("you cannot specify --meta with --restart-from option!", log) - if iontorrent: - support.error("you cannot specify --iontorrent with --restart-from option!", log) - if only_assembler: - support.error("you cannot specify --only-assembler with --restart-from option!", log) - if only_error_correction: - support.error("you cannot specify --only-error-correction with --restart-from option!", log) - - global restart_k_mers - global restart_careful - global restart_mismatch_corrector - global restart_disable_gzip_output - global restart_disable_rr - global restart_threads - global restart_memory - global restart_tmp_dir - global restart_qvoffset - global restart_cov_cutoff - global restart_developer_mode - global restart_reference - global restart_configs_dir - global restart_read_buffer_size - - restart_k_mers = k_mers - restart_careful = careful - restart_mismatch_corrector = mismatch_corrector - restart_disable_gzip_output = disable_gzip_output - restart_disable_rr = disable_rr - restart_threads = threads - restart_memory = memory - restart_tmp_dir = tmp_dir - restart_qvoffset = qvoffset - restart_cov_cutoff = cov_cutoff - restart_developer_mode = developer_mode - restart_reference = reference - restart_configs_dir = configs_dir - restart_read_buffer_size = read_buffer_size - - -def load_restart_options(): - global k_mers - global careful - global mismatch_corrector - global disable_gzip_output - global disable_rr - global threads - global memory - global tmp_dir - global qvoffset - global cov_cutoff - global developer_mode - global reference - global configs_dir - global read_buffer_size - global original_k_mers - - if restart_k_mers: - original_k_mers = k_mers - if restart_k_mers == 'auto': - k_mers = None # set by default - else: - k_mers = restart_k_mers - if restart_careful is not None: - careful = restart_careful - if restart_mismatch_corrector is not None: - mismatch_corrector = restart_mismatch_corrector - if disable_gzip_output is not None: - disable_gzip_output = restart_disable_gzip_output - if restart_disable_rr is not None: - disable_rr = restart_disable_rr - if restart_threads is not None: - threads = restart_threads - if restart_memory is not None: - memory = restart_memory - if restart_tmp_dir is not None: - tmp_dir = restart_tmp_dir - if restart_qvoffset is not None: - qvoffset = restart_qvoffset - if restart_cov_cutoff is not None: - cov_cutoff = restart_cov_cutoff - if restart_developer_mode is not None: - developer_mode = restart_developer_mode - if restart_reference is not None: - reference = restart_reference - if restart_configs_dir is not None: - configs_dir = restart_configs_dir - if restart_read_buffer_size is not None: - read_buffer_size = restart_read_buffer_size - - -def enable_truseq_mode(): - global truseq_mode - global correct_scaffolds - global run_truseq_postprocessing - global K_MERS_SHORT - global K_MERS_150 - global K_MERS_250 - global only_assembler - global single_cell - K_MERS_SHORT = [21,33,45,55] - K_MERS_150 = [21,33,45,55,77] - K_MERS_250 = [21,33,45,55,77,99,127] - truseq_mode = True - correct_scaffolds = True - run_truseq_postprocessing = True - only_assembler = True - - -def will_rerun(options): - for opt, arg in options: - if opt == '--continue' or opt.startswith('--restart-from'): # checks both --restart-from k33 and --restart-from=k33 - return True - return False diff --git a/src/SPAdes-3.10.1-Linux/share/spades/spades_pipeline/spades_logic.py b/src/SPAdes-3.10.1-Linux/share/spades/spades_pipeline/spades_logic.py deleted file mode 100644 index 8b47c0d..0000000 --- a/src/SPAdes-3.10.1-Linux/share/spades/spades_pipeline/spades_logic.py +++ /dev/null @@ -1,393 +0,0 @@ -#!/usr/bin/env python - -############################################################################ -# Copyright (c) 2015 Saint Petersburg State University -# Copyright (c) 2011-2014 Saint Petersburg Academic University -# All Rights Reserved -# See file LICENSE for details. -############################################################################ - -import os -import sys -import shutil -import support -import process_cfg -from process_cfg import bool_to_str -from site import addsitedir -from distutils import dir_util -import options_storage - -BASE_STAGE = "construction" -READS_TYPES_USED_IN_CONSTRUCTION = ["paired-end", "single", "hq-mate-pairs"] -READS_TYPES_USED_IN_RNA_SEQ = ["paired-end", "single", "trusted-contigs", "untrusted-contigs"] - - -def prepare_config_spades(filename, cfg, log, additional_contigs_fname, K, stage, saves_dir, last_one, execution_home): - subst_dict = dict() - - subst_dict["K"] = str(K) - subst_dict["dataset"] = process_cfg.process_spaces(cfg.dataset) - subst_dict["output_base"] = process_cfg.process_spaces(cfg.output_dir) - subst_dict["tmp_dir"] = process_cfg.process_spaces(cfg.tmp_dir) - if additional_contigs_fname: - subst_dict["additional_contigs"] = process_cfg.process_spaces(additional_contigs_fname) - subst_dict["use_additional_contigs"] = bool_to_str(True) - else: - subst_dict["use_additional_contigs"] = bool_to_str(False) - subst_dict["main_iteration"] = bool_to_str(last_one) - subst_dict["entry_point"] = stage - subst_dict["load_from"] = saves_dir - subst_dict["developer_mode"] = bool_to_str(cfg.developer_mode) - subst_dict["gap_closer_enable"] = bool_to_str(last_one or K >= 55) - subst_dict["rr_enable"] = bool_to_str(last_one and cfg.rr_enable) -# subst_dict["topology_simplif_enabled"] = bool_to_str(last_one) - subst_dict["max_threads"] = cfg.max_threads - subst_dict["max_memory"] = cfg.max_memory - if (not last_one): - subst_dict["correct_mismatches"] = bool_to_str(False) - if "resolving_mode" in cfg.__dict__: - subst_dict["resolving_mode"] = cfg.resolving_mode - if "pacbio_mode" in cfg.__dict__: - subst_dict["pacbio_test_on"] = bool_to_str(cfg.pacbio_mode) - subst_dict["pacbio_reads"] = process_cfg.process_spaces(cfg.pacbio_reads) - if cfg.cov_cutoff == "off": - subst_dict["use_coverage_threshold"] = bool_to_str(False) - else: - subst_dict["use_coverage_threshold"] = bool_to_str(True) - if cfg.cov_cutoff == "auto": - subst_dict["coverage_threshold"] = 0.0 - else: - subst_dict["coverage_threshold"] = cfg.cov_cutoff - - #TODO: make something about spades.py and config param substitution - if "bwa_paired" in cfg.__dict__: - subst_dict["bwa_enable"] = bool_to_str(True) - subst_dict["path_to_bwa"] = os.path.join(execution_home, "bwa-spades") - if "series_analysis" in cfg.__dict__: - subst_dict["series_analysis"] = cfg.series_analysis - process_cfg.substitute_params(filename, subst_dict, log) - - -def get_read_length(output_dir, K, ext_python_modules_home, log): - est_params_filename = os.path.join(output_dir, "K%d" % K, "final.lib_data") - max_read_length = 0 - if os.path.isfile(est_params_filename): - addsitedir(ext_python_modules_home) - if sys.version.startswith('2.'): - import pyyaml2 as pyyaml - elif sys.version.startswith('3.'): - import pyyaml3 as pyyaml - est_params_data = pyyaml.load(open(est_params_filename, 'r')) - for reads_library in est_params_data: - if reads_library['type'] in READS_TYPES_USED_IN_CONSTRUCTION: - if int(reads_library["read length"]) > max_read_length: - max_read_length = int(reads_library["read length"]) - if max_read_length == 0: - support.error("Failed to estimate maximum read length! File with estimated params: " + est_params_filename, log) - return max_read_length - - -def update_k_mers_in_special_cases(cur_k_mers, RL, log, silent=False): - if options_storage.auto_K_allowed(): - if RL >= 250: - if not silent: - log.info("Default k-mer sizes were set to %s because estimated " - "read length (%d) is equal to or greater than 250" % (str(options_storage.K_MERS_250), RL)) - return options_storage.K_MERS_250 - if RL >= 150: - if not silent: - log.info("Default k-mer sizes were set to %s because estimated " - "read length (%d) is equal to or greater than 150" % (str(options_storage.K_MERS_150), RL)) - return options_storage.K_MERS_150 - if RL <= max(cur_k_mers): - new_k_mers = [k for k in cur_k_mers if k < RL] - if not silent: - log.info("K-mer sizes were set to %s because estimated " - "read length (%d) is less than %d" % (str(new_k_mers), RL, max(cur_k_mers))) - return new_k_mers - return cur_k_mers - - -def reveal_original_k_mers(RL): - if options_storage.original_k_mers is None or options_storage.original_k_mers == 'auto': - cur_k_mers = options_storage.k_mers - options_storage.k_mers = options_storage.original_k_mers - original_k_mers = update_k_mers_in_special_cases(options_storage.K_MERS_SHORT, RL, None, silent=True) - options_storage.k_mers = cur_k_mers - else: - original_k_mers = options_storage.original_k_mers - original_k_mers = [k for k in original_k_mers if k < RL] - return original_k_mers - -def add_configs(command, configs_dir): - #Order matters here! - mode_config_mapping = [("single_cell", "mda_mode"), - ("meta", "meta_mode"), - ("truseq_mode", "moleculo_mode"), - ("rna", "rna_mode"), - ("large_genome", "large_genome_mode"), - ("plasmid", "plasmid_mode"), - ("careful", "careful_mode"), - ("diploid_mode", "diploid_mode")] - for (mode, config) in mode_config_mapping: - if options_storage.__dict__[mode]: - if mode == "rna" or mode == "meta": - command.append(os.path.join(configs_dir, "mda_mode.info")) - command.append(os.path.join(configs_dir, config + ".info")) - - -def run_iteration(configs_dir, execution_home, cfg, log, K, prev_K, last_one): - data_dir = os.path.join(cfg.output_dir, "K%d" % K) - stage = BASE_STAGE - saves_dir = os.path.join(data_dir, 'saves') - dst_configs = os.path.join(data_dir, "configs") - - if options_storage.continue_mode: - if os.path.isfile(os.path.join(data_dir, "final_contigs.fasta")) and not (options_storage.restart_from and - (options_storage.restart_from == ("k%d" % K) or options_storage.restart_from.startswith("k%d:" % K))): - log.info("\n== Skipping assembler: " + ("K%d" % K) + " (already processed)") - return - if options_storage.restart_from and options_storage.restart_from.find(":") != -1 \ - and options_storage.restart_from.startswith("k%d:" % K): - stage = options_storage.restart_from[options_storage.restart_from.find(":") + 1:] - support.continue_from_here(log) - - if stage != BASE_STAGE: - if not os.path.isdir(saves_dir): - support.error("Cannot restart from stage %s: saves were not found (%s)!" % (stage, saves_dir)) - else: - if os.path.exists(data_dir): - shutil.rmtree(data_dir) - os.makedirs(data_dir) - - dir_util._path_created = {} # see http://stackoverflow.com/questions/9160227/dir-util-copy-tree-fails-after-shutil-rmtree - dir_util.copy_tree(os.path.join(configs_dir, "debruijn"), dst_configs, preserve_times=False) - - log.info("\n== Running assembler: " + ("K%d" % K) + "\n") - if prev_K: - additional_contigs_fname = os.path.join(cfg.output_dir, "K%d" % prev_K, "simplified_contigs.fasta") - if not os.path.isfile(additional_contigs_fname): - support.warning("additional contigs for K=%d were not found (%s)!" % (K, additional_contigs_fname), log) - additional_contigs_fname = None - else: - additional_contigs_fname = None - if "read_buffer_size" in cfg.__dict__: - #FIXME why here??? - process_cfg.substitute_params(os.path.join(dst_configs, "construction.info"), {"read_buffer_size": cfg.read_buffer_size}, log) - if "scaffolding_mode" in cfg.__dict__: - #FIXME why here??? - process_cfg.substitute_params(os.path.join(dst_configs, "pe_params.info"), {"scaffolding_mode": cfg.scaffolding_mode}, log) - - cfg_fn = os.path.join(dst_configs, "config.info") - prepare_config_spades(cfg_fn, cfg, log, additional_contigs_fname, K, stage, saves_dir, last_one, execution_home) - - command = [os.path.join(execution_home, "spades"), cfg_fn] - - add_configs(command, dst_configs) - - #print("Calling: " + " ".join(command)) - support.sys_call(command, log) - - -def prepare_config_scaffold_correction(filename, cfg, log, saves_dir, K): - subst_dict = dict() - - subst_dict["K"] = str(K) - subst_dict["dataset"] = process_cfg.process_spaces(cfg.dataset) - subst_dict["output_base"] = process_cfg.process_spaces(os.path.join(cfg.output_dir, "SCC")) - subst_dict["tmp_dir"] = process_cfg.process_spaces(cfg.tmp_dir) - subst_dict["use_additional_contigs"] = bool_to_str(False) - subst_dict["main_iteration"] = bool_to_str(False) - subst_dict["entry_point"] = BASE_STAGE - subst_dict["load_from"] = saves_dir - subst_dict["developer_mode"] = bool_to_str(cfg.developer_mode) - subst_dict["max_threads"] = cfg.max_threads - subst_dict["max_memory"] = cfg.max_memory - - #todo - process_cfg.substitute_params(filename, subst_dict, log) - - -def run_scaffold_correction(configs_dir, execution_home, cfg, log, latest, K): - data_dir = os.path.join(cfg.output_dir, "SCC", "K%d" % K) - saves_dir = os.path.join(data_dir, 'saves') - dst_configs = os.path.join(data_dir, "configs") - cfg_file_name = os.path.join(dst_configs, "config.info") - - if os.path.exists(data_dir): - shutil.rmtree(data_dir) - os.makedirs(data_dir) - - dir_util.copy_tree(os.path.join(configs_dir, "debruijn"), dst_configs, preserve_times=False) - - log.info("\n== Running scaffold correction \n") - scaffolds_file = os.path.join(latest, "scaffolds.fasta") - if not os.path.isfile(scaffolds_file): - support.error("Scaffodls were not found in " + scaffolds_file, log) - if "read_buffer_size" in cfg.__dict__: - construction_cfg_file_name = os.path.join(dst_configs, "construction.info") - process_cfg.substitute_params(construction_cfg_file_name, {"read_buffer_size": cfg.read_buffer_size}, log) - process_cfg.substitute_params(os.path.join(dst_configs, "moleculo_mode.info"), {"scaffolds_file": scaffolds_file}, log) - prepare_config_scaffold_correction(cfg_file_name, cfg, log, saves_dir, K) - command = [os.path.join(execution_home, "scaffold_correction"), cfg_file_name] - add_configs(command, dst_configs) - log.info(str(command)) - support.sys_call(command, log) - - -def run_spades(configs_dir, execution_home, cfg, dataset_data, ext_python_modules_home, log): - if not isinstance(cfg.iterative_K, list): - cfg.iterative_K = [cfg.iterative_K] - cfg.iterative_K = sorted(cfg.iterative_K) - used_K = [] - - # checking and removing conflicting K-mer directories - if options_storage.restart_from and (options_storage.restart_k_mers != options_storage.original_k_mers): - processed_K = [] - for k in range(options_storage.MIN_K, options_storage.MAX_K, 2): - cur_K_dir = os.path.join(cfg.output_dir, "K%d" % k) - if os.path.isdir(cur_K_dir) and os.path.isfile(os.path.join(cur_K_dir, "final_contigs.fasta")): - processed_K.append(k) - if processed_K: - RL = get_read_length(cfg.output_dir, processed_K[0], ext_python_modules_home, log) - needed_K = update_k_mers_in_special_cases(cfg.iterative_K, RL, log, silent=True) - needed_K = [k for k in needed_K if k < RL] - original_K = reveal_original_k_mers(RL) - - k_to_delete = [] - for id, k in enumerate(needed_K): - if len(processed_K) == id: - if processed_K[-1] == original_K[-1]: # the last K in the original run was processed in "last_one" mode - k_to_delete = [original_K[-1]] - break - if processed_K[id] != k: - k_to_delete = processed_K[id:] - break - if not k_to_delete and (len(processed_K) > len(needed_K)): - k_to_delete = processed_K[len(needed_K) - 1:] - if k_to_delete: - log.info("Restart mode: removing previously processed directories for K=%s " - "to avoid conflicts with K specified with --restart-from" % (str(k_to_delete))) - for k in k_to_delete: - shutil.rmtree(os.path.join(cfg.output_dir, "K%d" % k)) - - bin_reads_dir = os.path.join(cfg.output_dir, ".bin_reads") - if os.path.isdir(bin_reads_dir) and not options_storage.continue_mode: - shutil.rmtree(bin_reads_dir) - cfg.tmp_dir = support.get_tmp_dir(prefix="spades_") - - finished_on_stop_after = False - K = cfg.iterative_K[0] - if len(cfg.iterative_K) == 1: - run_iteration(configs_dir, execution_home, cfg, log, K, None, True) - used_K.append(K) - else: - run_iteration(configs_dir, execution_home, cfg, log, K, None, False) - used_K.append(K) - if options_storage.stop_after == "k%d" % K: - finished_on_stop_after = True - else: - prev_K = K - RL = get_read_length(cfg.output_dir, K, ext_python_modules_home, log) - cfg.iterative_K = update_k_mers_in_special_cases(cfg.iterative_K, RL, log) - if len(cfg.iterative_K) < 2 or cfg.iterative_K[1] + 1 > RL: - if cfg.rr_enable: - if len(cfg.iterative_K) < 2: - log.info("== Rerunning for the first value of K (%d) with Repeat Resolving" % - cfg.iterative_K[0]) - else: - support.warning("Second value of iterative K (%d) exceeded estimated read length (%d). " - "Rerunning for the first value of K (%d) with Repeat Resolving" % - (cfg.iterative_K[1], RL, cfg.iterative_K[0]), log) - run_iteration(configs_dir, execution_home, cfg, log, cfg.iterative_K[0], None, True) - used_K.append(cfg.iterative_K[0]) - K = cfg.iterative_K[0] - else: - rest_of_iterative_K = cfg.iterative_K - rest_of_iterative_K.pop(0) - count = 0 - for K in rest_of_iterative_K: - count += 1 - last_one = count == len(cfg.iterative_K) or (rest_of_iterative_K[count] + 1 > RL) - run_iteration(configs_dir, execution_home, cfg, log, K, prev_K, last_one) - used_K.append(K) - prev_K = K - if last_one: - break - if options_storage.stop_after == "k%d" % K: - finished_on_stop_after = True - break - if count < len(cfg.iterative_K) and not finished_on_stop_after: - support.warning("Iterations stopped. Value of K (%d) exceeded estimated read length (%d)" % - (cfg.iterative_K[count], RL), log) - - if options_storage.stop_after and options_storage.stop_after.startswith('k'): - support.finish_here(log) - latest = os.path.join(cfg.output_dir, "K%d" % K) - - if cfg.correct_scaffolds and not options_storage.run_completed: - if options_storage.continue_mode and os.path.isfile(os.path.join(cfg.output_dir, "SCC", "corrected_scaffolds.fasta")) and not options_storage.restart_from == "scc": - log.info("\n===== Skipping %s (already processed). \n" % "scaffold correction") - else: - if options_storage.continue_mode: - support.continue_from_here(log) - run_scaffold_correction(configs_dir, execution_home, cfg, log, latest, 21) - latest = os.path.join(os.path.join(cfg.output_dir, "SCC"), "K21") - if options_storage.stop_after == 'scc': - support.finish_here(log) - - if cfg.correct_scaffolds: - correct_scaffolds_fpath = os.path.join(latest, "corrected_scaffolds.fasta") - if os.path.isfile(correct_scaffolds_fpath): - shutil.copyfile(correct_scaffolds_fpath, cfg.result_scaffolds) - elif not finished_on_stop_after: # interupted by --stop-after, so final K is not processed! - if os.path.isfile(os.path.join(latest, "before_rr.fasta")): - result_before_rr_contigs = os.path.join(os.path.dirname(cfg.result_contigs), "before_rr.fasta") - if not os.path.isfile(result_before_rr_contigs) or not options_storage.continue_mode: - shutil.copyfile(os.path.join(latest, "before_rr.fasta"), result_before_rr_contigs) - if options_storage.rna: - if os.path.isfile(os.path.join(latest, "transcripts.fasta")): - if not os.path.isfile(cfg.result_transcripts) or not options_storage.continue_mode: - shutil.copyfile(os.path.join(latest, "transcripts.fasta"), cfg.result_transcripts) - if os.path.isfile(os.path.join(latest, "transcripts.paths")): - if not os.path.isfile(cfg.result_transcripts_paths) or not options_storage.continue_mode: - shutil.copyfile(os.path.join(latest, "transcripts.paths"), cfg.result_transcripts_paths) - else: - if os.path.isfile(os.path.join(latest, "final_contigs.fasta")): - if not os.path.isfile(cfg.result_contigs) or not options_storage.continue_mode: - shutil.copyfile(os.path.join(latest, "final_contigs.fasta"), cfg.result_contigs) - if os.path.isfile(os.path.join(latest, "first_pe_contigs.fasta")): - result_first_pe_contigs = os.path.join(os.path.dirname(cfg.result_contigs), "first_pe_contigs.fasta") - if not os.path.isfile(result_first_pe_contigs) or not options_storage.continue_mode: - shutil.copyfile(os.path.join(latest, "first_pe_contigs.fasta"), result_first_pe_contigs) - if cfg.rr_enable: - if os.path.isfile(os.path.join(latest, "scaffolds.fasta")): - if not os.path.isfile(cfg.result_scaffolds) or not options_storage.continue_mode: - shutil.copyfile(os.path.join(latest, "scaffolds.fasta"), cfg.result_scaffolds) - if os.path.isfile(os.path.join(latest, "scaffolds.paths")): - if not os.path.isfile(cfg.result_scaffolds_paths) or not options_storage.continue_mode: - shutil.copyfile(os.path.join(latest, "scaffolds.paths"), cfg.result_scaffolds_paths) - if os.path.isfile(os.path.join(latest, "assembly_graph.gfa")): - if not os.path.isfile(cfg.result_graph_gfa) or not options_storage.continue_mode: - shutil.copyfile(os.path.join(latest, "assembly_graph.gfa"), cfg.result_graph_gfa) - if os.path.isfile(os.path.join(latest, "assembly_graph.fastg")): - if not os.path.isfile(cfg.result_graph) or not options_storage.continue_mode: - shutil.copyfile(os.path.join(latest, "assembly_graph.fastg"), cfg.result_graph) - if os.path.isfile(os.path.join(latest, "final_contigs.paths")): - if not os.path.isfile(cfg.result_contigs_paths) or not options_storage.continue_mode: - shutil.copyfile(os.path.join(latest, "final_contigs.paths"), cfg.result_contigs_paths) - - - if cfg.developer_mode: - # saves - saves_link = os.path.join(os.path.dirname(cfg.result_contigs), "saves") - if os.path.lexists(saves_link): # exists returns False for broken links! lexists return True - os.remove(saves_link) - os.symlink(os.path.join(latest, "saves"), saves_link) - - if os.path.isdir(bin_reads_dir): - shutil.rmtree(bin_reads_dir) - if os.path.isdir(cfg.tmp_dir): - shutil.rmtree(cfg.tmp_dir) - - return used_K diff --git a/src/SPAdes-3.14.0-Linux/bin/cds-mapping-stats b/src/SPAdes-3.14.0-Linux/bin/cds-mapping-stats new file mode 100755 index 0000000..920bec1 Binary files /dev/null and b/src/SPAdes-3.14.0-Linux/bin/cds-mapping-stats differ diff --git a/src/SPAdes-3.14.0-Linux/bin/cds-subgraphs b/src/SPAdes-3.14.0-Linux/bin/cds-subgraphs new file mode 100755 index 0000000..7d895f1 Binary files /dev/null and b/src/SPAdes-3.14.0-Linux/bin/cds-subgraphs differ diff --git a/src/SPAdes-3.14.0-Linux/bin/mag-improve b/src/SPAdes-3.14.0-Linux/bin/mag-improve new file mode 100755 index 0000000..9f5e510 Binary files /dev/null and b/src/SPAdes-3.14.0-Linux/bin/mag-improve differ diff --git a/src/SPAdes-3.10.1-Linux/bin/metaspades.py b/src/SPAdes-3.14.0-Linux/bin/metaspades.py similarity index 100% rename from src/SPAdes-3.10.1-Linux/bin/metaspades.py rename to src/SPAdes-3.14.0-Linux/bin/metaspades.py diff --git a/src/SPAdes-3.10.1-Linux/bin/plasmidspades.py b/src/SPAdes-3.14.0-Linux/bin/plasmidspades.py similarity index 100% rename from src/SPAdes-3.10.1-Linux/bin/plasmidspades.py rename to src/SPAdes-3.14.0-Linux/bin/plasmidspades.py diff --git a/src/SPAdes-3.10.1-Linux/bin/rnaspades.py b/src/SPAdes-3.14.0-Linux/bin/rnaspades.py similarity index 100% rename from src/SPAdes-3.10.1-Linux/bin/rnaspades.py rename to src/SPAdes-3.14.0-Linux/bin/rnaspades.py diff --git a/src/SPAdes-3.14.0-Linux/bin/spades-bwa b/src/SPAdes-3.14.0-Linux/bin/spades-bwa new file mode 100755 index 0000000..77b6113 Binary files /dev/null and b/src/SPAdes-3.14.0-Linux/bin/spades-bwa differ diff --git a/src/SPAdes-3.14.0-Linux/bin/spades-convert-bin-to-fasta b/src/SPAdes-3.14.0-Linux/bin/spades-convert-bin-to-fasta new file mode 100755 index 0000000..1282753 Binary files /dev/null and b/src/SPAdes-3.14.0-Linux/bin/spades-convert-bin-to-fasta differ diff --git a/src/SPAdes-3.14.0-Linux/bin/spades-core b/src/SPAdes-3.14.0-Linux/bin/spades-core new file mode 100755 index 0000000..cd80a86 Binary files /dev/null and b/src/SPAdes-3.14.0-Linux/bin/spades-core differ diff --git a/src/SPAdes-3.14.0-Linux/bin/spades-corrector-core b/src/SPAdes-3.14.0-Linux/bin/spades-corrector-core new file mode 100755 index 0000000..ab75171 Binary files /dev/null and b/src/SPAdes-3.14.0-Linux/bin/spades-corrector-core differ diff --git a/src/SPAdes-3.14.0-Linux/bin/spades-gbuilder b/src/SPAdes-3.14.0-Linux/bin/spades-gbuilder new file mode 100755 index 0000000..1064402 Binary files /dev/null and b/src/SPAdes-3.14.0-Linux/bin/spades-gbuilder differ diff --git a/src/SPAdes-3.14.0-Linux/bin/spades-gmapper b/src/SPAdes-3.14.0-Linux/bin/spades-gmapper new file mode 100755 index 0000000..9b57e45 Binary files /dev/null and b/src/SPAdes-3.14.0-Linux/bin/spades-gmapper differ diff --git a/src/SPAdes-3.14.0-Linux/bin/spades-gsimplifier b/src/SPAdes-3.14.0-Linux/bin/spades-gsimplifier new file mode 100755 index 0000000..30dcaf3 Binary files /dev/null and b/src/SPAdes-3.14.0-Linux/bin/spades-gsimplifier differ diff --git a/src/SPAdes-3.14.0-Linux/bin/spades-hammer b/src/SPAdes-3.14.0-Linux/bin/spades-hammer new file mode 100755 index 0000000..0b2ce3a Binary files /dev/null and b/src/SPAdes-3.14.0-Linux/bin/spades-hammer differ diff --git a/src/SPAdes-3.14.0-Linux/bin/spades-ionhammer b/src/SPAdes-3.14.0-Linux/bin/spades-ionhammer new file mode 100755 index 0000000..5d45572 Binary files /dev/null and b/src/SPAdes-3.14.0-Linux/bin/spades-ionhammer differ diff --git a/src/SPAdes-3.14.0-Linux/bin/spades-kmer-estimating b/src/SPAdes-3.14.0-Linux/bin/spades-kmer-estimating new file mode 100755 index 0000000..3faa71d Binary files /dev/null and b/src/SPAdes-3.14.0-Linux/bin/spades-kmer-estimating differ diff --git a/src/SPAdes-3.14.0-Linux/bin/spades-kmercount b/src/SPAdes-3.14.0-Linux/bin/spades-kmercount new file mode 100755 index 0000000..0229d0f Binary files /dev/null and b/src/SPAdes-3.14.0-Linux/bin/spades-kmercount differ diff --git a/src/SPAdes-3.14.0-Linux/bin/spades-read-filter b/src/SPAdes-3.14.0-Linux/bin/spades-read-filter new file mode 100755 index 0000000..d0b2341 Binary files /dev/null and b/src/SPAdes-3.14.0-Linux/bin/spades-read-filter differ diff --git a/src/SPAdes-3.14.0-Linux/bin/spades-truseq-scfcorrection b/src/SPAdes-3.14.0-Linux/bin/spades-truseq-scfcorrection new file mode 100755 index 0000000..09ebbd3 Binary files /dev/null and b/src/SPAdes-3.14.0-Linux/bin/spades-truseq-scfcorrection differ diff --git a/src/SPAdes-3.14.0-Linux/bin/spades.py b/src/SPAdes-3.14.0-Linux/bin/spades.py new file mode 100755 index 0000000..ba555e9 --- /dev/null +++ b/src/SPAdes-3.14.0-Linux/bin/spades.py @@ -0,0 +1,639 @@ +#!/usr/bin/env python + +############################################################################ +# Copyright (c) 2015-2019 Saint Petersburg State University +# Copyright (c) 2011-2014 Saint Petersburg Academic University +# All Rights Reserved +# See file LICENSE for details. +############################################################################ + +import logging +import os +import shutil +import platform +import sys +from site import addsitedir + +import spades_init + +spades_init.init() +spades_home = spades_init.spades_home +bin_home = spades_init.bin_home +python_modules_home = spades_init.python_modules_home +ext_python_modules_home = spades_init.ext_python_modules_home +spades_version = spades_init.spades_version + +import support + +support.check_python_version() + +addsitedir(ext_python_modules_home) +if sys.version.startswith("2."): + import pyyaml2 as pyyaml +elif sys.version.startswith("3."): + import pyyaml3 as pyyaml +import options_storage +options_storage.spades_version = spades_version + +import options_parser +from stages.pipeline import Pipeline +import executor_local +import executor_save_yaml + +def print_used_values(cfg, log): + def print_value(cfg, section, param, pretty_param="", margin=" "): + if not pretty_param: + pretty_param = param.capitalize().replace('_', ' ') + line = margin + pretty_param + if param in cfg[section].__dict__: + line += ": " + str(cfg[section].__dict__[param]) + else: + if "offset" in param: + line += " will be auto-detected" + log.info(line) + + log.info("") + + # system info + log.info("System information:") + try: + log.info(" SPAdes version: " + str(spades_version).strip()) + log.info(" Python version: " + ".".join(map(str, sys.version_info[0:3]))) + # for more details: '[' + str(sys.version_info) + ']' + log.info(" OS: " + platform.platform()) + # for more details: '[' + str(platform.uname()) + ']' + except Exception: + log.info(" Problem occurred when getting system information") + log.info("") + + # main + print_value(cfg, "common", "output_dir", "", "") + if ("error_correction" in cfg) and (not "assembly" in cfg): + log.info("Mode: ONLY read error correction (without assembling)") + elif (not "error_correction" in cfg) and ("assembly" in cfg): + log.info("Mode: ONLY assembling (without read error correction)") + else: + log.info("Mode: read error correction and assembling") + if ("common" in cfg) and ("developer_mode" in cfg["common"].__dict__): + if cfg["common"].developer_mode: + log.info("Debug mode is turned ON") + else: + log.info("Debug mode is turned OFF") + log.info("") + + # dataset + if "dataset" in cfg: + log.info("Dataset parameters:") + + if options_storage.args.iontorrent: + log.info(" IonTorrent data") + if options_storage.args.bio: + log.info(" BiosyntheticSPAdes mode") + if options_storage.args.meta: + log.info(" Metagenomic mode") + elif options_storage.args.large_genome: + log.info(" Large genome mode") + elif options_storage.args.truseq_mode: + log.info(" Illumina TruSeq mode") + elif options_storage.args.isolate: + log.info(" Isolate mode") + elif options_storage.args.rna: + log.info(" RNA-seq mode") + elif options_storage.args.single_cell: + log.info(" Single-cell mode") + else: + log.info(" Standard mode") + log.info(" For multi-cell/isolate data we recommend to use '--isolate' option;" \ + " for single-cell MDA data use '--sc';" \ + " for metagenomic data use '--meta';" \ + " for RNA-Seq use '--rna'.") + + log.info(" Reads:") + dataset_data = pyyaml.load(open(cfg["dataset"].yaml_filename)) + dataset_data = support.relative2abs_paths(dataset_data, os.path.dirname(cfg["dataset"].yaml_filename)) + support.pretty_print_reads(dataset_data, log) + + # error correction + if "error_correction" in cfg: + log.info("Read error correction parameters:") + print_value(cfg, "error_correction", "max_iterations", "Iterations") + print_value(cfg, "error_correction", "qvoffset", "PHRED offset") + + if cfg["error_correction"].gzip_output: + log.info(" Corrected reads will be compressed") + else: + log.info(" Corrected reads will NOT be compressed") + + # assembly + if "assembly" in cfg: + log.info("Assembly parameters:") + if options_storage.auto_K_allowed(): + log.info(" k: automatic selection based on read length") + else: + print_value(cfg, "assembly", "iterative_K", "k") + if options_storage.args.plasmid: + log.info(" Plasmid mode is turned ON") + if cfg["assembly"].disable_rr: + log.info(" Repeat resolution is DISABLED") + else: + log.info(" Repeat resolution is enabled") + if options_storage.args.careful: + log.info(" Mismatch careful mode is turned ON") + else: + log.info(" Mismatch careful mode is turned OFF") + if "mismatch_corrector" in cfg: + log.info(" MismatchCorrector will be used") + else: + log.info(" MismatchCorrector will be SKIPPED") + if cfg["assembly"].cov_cutoff == "off": + log.info(" Coverage cutoff is turned OFF") + elif cfg["assembly"].cov_cutoff == "auto": + log.info(" Coverage cutoff is turned ON and threshold will be auto-detected") + else: + log.info(" Coverage cutoff is turned ON and threshold is %f" % cfg["assembly"].cov_cutoff) + + log.info("Other parameters:") + print_value(cfg, "common", "tmp_dir", "Dir for temp files") + print_value(cfg, "common", "max_threads", "Threads") + print_value(cfg, "common", "max_memory", "Memory limit (in Gb)", " ") + log.info("") + + +def create_logger(): + log = logging.getLogger("spades") + log.setLevel(logging.DEBUG) + + + console = logging.StreamHandler(sys.stdout) + console.setFormatter(logging.Formatter("%(message)s")) + console.setLevel(logging.DEBUG) + log.addHandler(console) + return log + + +def check_cfg_for_partial_run(cfg, partial_run_type="restart-from"): # restart-from ot stop-after + if partial_run_type == "restart-from": + check_point = options_storage.args.restart_from + action = "restart from" + verb = "was" + elif partial_run_type == "stop-after": + check_point = options_storage.args.stop_after + action = "stop after" + verb = "is" + else: + return + + if check_point == "ec" and ("error_correction" not in cfg): + support.error( + "failed to %s 'read error correction' ('%s') because this stage %s not specified!" % (action, check_point, verb)) + if check_point == "mc" and ("mismatch_corrector" not in cfg): + support.error( + "failed to %s 'mismatch correction' ('%s') because this stage %s not specified!" % (action, check_point, verb)) + if check_point == "as" or check_point.startswith('k'): + if "assembly" not in cfg: + support.error( + "failed to %s 'assembling' ('%s') because this stage %s not specified!" % (action, check_point, verb)) + +def get_options_from_params(params_filename, running_script): + command_line = None + options = None + prev_running_script = None + if not os.path.isfile(params_filename): + return command_line, options, prev_running_script, \ + "failed to parse command line of the previous run (%s not found)!" % params_filename + + with open(params_filename) as params: + command_line = params.readline().strip() + spades_prev_version = None + for line in params: + if "SPAdes version:" in line: + spades_prev_version = line.split("SPAdes version:")[1] + break + + if spades_prev_version is None: + return command_line, options, prev_running_script, \ + "failed to parse SPAdes version of the previous run!" + if spades_prev_version.strip() != spades_version.strip(): + return command_line, options, prev_running_script, \ + "SPAdes version of the previous run (%s) is not equal to the current version of SPAdes (%s)!" \ + % (spades_prev_version.strip(), spades_version.strip()) + if "Command line: " not in command_line or '\t' not in command_line: + return command_line, options, prev_running_script, "failed to parse executable script of the previous run!" + options = command_line.split('\t')[1:] + prev_running_script = command_line.split('\t')[0][len("Command line: "):] + prev_running_script = os.path.basename(prev_running_script) + running_script = os.path.basename(running_script) + # we cannot restart/continue spades.py run with metaspades.py/rnaspades.py/etc and vice versa + if prev_running_script != running_script: + message = "executable script of the previous run (%s) is not equal " \ + "to the current executable script (%s)!" % prev_running_script, running_script + return command_line, options, prev_running_script, message + return command_line, options, prev_running_script, "" + + +# parse options and safe all parameters to cfg +def parse_args(args, log): + options, cfg, dataset_data = options_parser.parse_args(log, bin_home, spades_home, + secondary_filling=False, restart_from=False) + + command_line = "" + + if options_storage.args.continue_mode: + restart_from = options_storage.args.restart_from + command_line, options, script_name, err_msg = get_options_from_params( + os.path.join(options_storage.args.output_dir, "params.txt"), + args[0]) + if err_msg: + support.error(err_msg + " Please restart from the beginning or specify another output directory.") + options, cfg, dataset_data = options_parser.parse_args(log, bin_home, spades_home, secondary_filling=True, + restart_from=(options_storage.args.restart_from is not None), + options=options) + + options_storage.args.continue_mode = True + options_storage.args.restart_from = restart_from + + if options_storage.args.restart_from: + check_cfg_for_partial_run(cfg, partial_run_type="restart-from") + + if options_storage.args.stop_after: + check_cfg_for_partial_run(cfg, partial_run_type="stop-after") + + support.check_single_reads_in_options(log) + return cfg, dataset_data, command_line + + +def add_file_to_log(cfg, log): + log_filename = os.path.join(cfg["common"].output_dir, "spades.log") + if options_storage.args.continue_mode: + log_handler = logging.FileHandler(log_filename, mode='a') + else: + log_handler = logging.FileHandler(log_filename, mode='w') + log.addHandler(log_handler) + return log_filename, log_handler + + +def get_restart_from_command_line(args): + updated_params = "" + for i in range(1, len(args)): + if not args[i].startswith("-o") and not args[i].startswith("--restart-from") and \ + args[i - 1] != "-o" and args[i - 1] != "--restart-from": + updated_params += "\t" + args[i] + + updated_params = updated_params.strip() + restart_from_update_message = "Restart-from=" + options_storage.args.restart_from + "\n" + restart_from_update_message += "with updated parameters: " + updated_params + return updated_params, restart_from_update_message + + +def get_command_line(args): + command = "" + for v in args: + # substituting relative paths with absolute ones (read paths, output dir path, etc) + v, prefix = support.get_option_prefix(v) + if v in options_storage.dict_of_rel2abs.keys(): + v = options_storage.dict_of_rel2abs[v] + if prefix: + command += prefix + ":" + command += v + "\t" + return command + + +def print_params(log, log_filename, command_line, args, cfg): + if options_storage.args.continue_mode: + log.info("\n======= SPAdes pipeline continued. Log can be found here: " + log_filename + "\n") + log.info("Restored from " + command_line) + log.info("") + + params_filename = os.path.join(cfg["common"].output_dir, "params.txt") + params_handler = logging.FileHandler(params_filename, mode='w') + log.addHandler(params_handler) + + if not options_storage.args.continue_mode: + log.info("Command line: " + get_command_line(args)) + elif options_storage.args.restart_from: + update_params, restart_from_update_message = get_restart_from_command_line(args) + command_line += "\t" + update_params + log.info(command_line) + log.info(restart_from_update_message) + else: + log.info(command_line) + + + print_used_values(cfg, log) + log.removeHandler(params_handler) + + +def clear_configs(cfg, log, command_before_restart_from, stage_id_before_restart_from): + def matches_with_restart_from_arg(stage, restart_from_arg): + return stage["short_name"].startswith(restart_from_arg.split(":")[0]) + + spades_commands_fpath = os.path.join(cfg["common"].output_dir, "run_spades.yaml") + with open(spades_commands_fpath) as stream: + old_pipeline = pyyaml.load(stream) + + restart_from_stage_id = None + for num in range(len(old_pipeline)): + stage = old_pipeline[num] + if matches_with_restart_from_arg(stage, options_storage.args.restart_from): + restart_from_stage_id = num + break + + if command_before_restart_from is not None and \ + old_pipeline[stage_id_before_restart_from]["short_name"] != command_before_restart_from.short_name: + support.error("new and old pipelines have difference before %s" % options_storage.args.restart_from, log) + + if command_before_restart_from is None: + first_del = 0 + else: + first_del = stage_id_before_restart_from + 1 + + if restart_from_stage_id is not None: + stage_filename = options_storage.get_stage_filename(restart_from_stage_id, old_pipeline[restart_from_stage_id]["short_name"]) + if os.path.isfile(stage_filename): + os.remove(stage_filename) + + for delete_id in range(first_del, len(old_pipeline)): + stage_filename = options_storage.get_stage_filename(delete_id, old_pipeline[delete_id]["short_name"]) + if os.path.isfile(stage_filename): + os.remove(stage_filename) + + cfg_dir = old_pipeline[delete_id]["config_dir"] + if cfg_dir != "" and os.path.isdir(os.path.join(cfg["common"].output_dir, cfg_dir)): + shutil.rmtree(os.path.join(cfg["common"].output_dir, cfg_dir)) + + +def get_first_incomplete_command(filename): + with open(filename) as stream: + old_pipeline = pyyaml.load(stream) + + first_incomplete_stage_id = 0 + while first_incomplete_stage_id < len(old_pipeline): + stage_filename = options_storage.get_stage_filename(first_incomplete_stage_id, old_pipeline[first_incomplete_stage_id]["short_name"]) + if not os.path.isfile(stage_filename): + return old_pipeline[first_incomplete_stage_id] + first_incomplete_stage_id += 1 + + return None + + +def get_command_and_stage_id_before_restart_from(draft_commands, cfg, log): + restart_from_stage_name = options_storage.args.restart_from.split(":")[0] + + if options_storage.args.restart_from == options_storage.LAST_STAGE: + last_command = get_first_incomplete_command(os.path.join(get_stage.cfg["common"].output_dir, "run_spades.yaml")) + if last_command is None: + restart_from_stage_name = draft_commands[-1].short_name + else: + restart_from_stage_name = last_command["short_name"] + + restart_from_stage_id = None + for num in range(len(draft_commands)): + stage = draft_commands[num] + if stage.short_name.startswith(restart_from_stage_name): + restart_from_stage_id = num + break + + if restart_from_stage_id is None: + support.error( + "failed to restart from %s because this stage was not specified!" % options_storage.args.restart_from, + log) + + if ":" in options_storage.args.restart_from or options_storage.args.restart_from == options_storage.LAST_STAGE: + return draft_commands[restart_from_stage_id], restart_from_stage_id + + if restart_from_stage_id > 0: + stage_filename = options_storage.get_stage_filename(restart_from_stage_id - 1, draft_commands[restart_from_stage_id - 1].short_name) + if not os.path.isfile(stage_filename): + support.error( + "cannot restart from stage %s: previous stage was not complete." % options_storage.args.restart_from, + log) + return draft_commands[restart_from_stage_id - 1], restart_from_stage_id - 1 + return None, None + + +def print_info_about_output_files(cfg, log, output_files): + def check_and_report_output_file(output_file_key, message_prefix_text): + if os.path.isfile(output_files[output_file_key]): + message = message_prefix_text + support.process_spaces(output_files[output_file_key]) + log.info(message) + + if "error_correction" in cfg and os.path.isdir( + os.path.dirname(output_files["corrected_dataset_yaml_filename"])): + log.info(" * Corrected reads are in " + support.process_spaces( + os.path.dirname(output_files["corrected_dataset_yaml_filename"]) + "/")) + + if "assembly" in cfg: + check_and_report_output_file("result_contigs_filename", " * Assembled contigs are in ") + + if options_storage.args.bio: + check_and_report_output_file("result_domain_graph_filename", " * Domain graph is in ") + check_and_report_output_file("result_gene_clusters_filename", " * Gene cluster sequences are in ") + check_and_report_output_file("result_bgc_stats_filename", " * BGC cluster statistics ") + + if options_storage.args.rna: + check_and_report_output_file("result_transcripts_filename", " * Assembled transcripts are in ") + check_and_report_output_file("result_transcripts_paths_filename", + " * Paths in the assembly graph corresponding to the transcripts are in ") + + for filtering_type in options_storage.filtering_types: + result_filtered_transcripts_filename = os.path.join(cfg["common"].output_dir, + filtering_type + "_filtered_" + + options_storage.transcripts_name) + if os.path.isfile(result_filtered_transcripts_filename): + message = " * " + filtering_type.capitalize() + " filtered transcripts are in " + \ + support.process_spaces(result_filtered_transcripts_filename) + log.info(message) + else: + check_and_report_output_file("result_scaffolds_filename", " * Assembled scaffolds are in ") + check_and_report_output_file("result_contigs_paths_filename", + " * Paths in the assembly graph corresponding to the contigs are in ") + check_and_report_output_file("result_scaffolds_paths_filename", + " * Paths in the assembly graph corresponding to the scaffolds are in ") + + check_and_report_output_file("result_assembly_graph_filename", " * Assembly graph is in ") + check_and_report_output_file("result_assembly_graph_filename_gfa", " * Assembly graph in GFA format is in ") + + +def get_output_files(cfg): + output_files = dict() + output_files["corrected_dataset_yaml_filename"] = "" + output_files["result_contigs_filename"] = os.path.join(cfg["common"].output_dir, options_storage.contigs_name) + output_files["result_scaffolds_filename"] = os.path.join(cfg["common"].output_dir, options_storage.scaffolds_name) + output_files["result_assembly_graph_filename"] = os.path.join(cfg["common"].output_dir, + options_storage.assembly_graph_name) + output_files["result_assembly_graph_filename_gfa"] = os.path.join(cfg["common"].output_dir, + options_storage.assembly_graph_name_gfa) + output_files["result_contigs_paths_filename"] = os.path.join(cfg["common"].output_dir, + options_storage.contigs_paths) + output_files["result_scaffolds_paths_filename"] = os.path.join(cfg["common"].output_dir, + options_storage.scaffolds_paths) + output_files["result_transcripts_filename"] = os.path.join(cfg["common"].output_dir, + options_storage.transcripts_name) + output_files["result_transcripts_paths_filename"] = os.path.join(cfg["common"].output_dir, + options_storage.transcripts_paths) + output_files["result_bgc_stats_filename"] = os.path.join(cfg["common"].output_dir, options_storage.bgc_stats_name) + output_files["result_domain_graph_filename"] = os.path.join(cfg["common"].output_dir, options_storage.domain_graph_name) + output_files["result_gene_clusters_filename"] = os.path.join(cfg["common"].output_dir, options_storage.gene_clusters_name) + output_files["truseq_long_reads_file_base"] = os.path.join(cfg["common"].output_dir, "truseq_long_reads") + output_files["truseq_long_reads_file"] = output_files["truseq_long_reads_file_base"] + ".fasta" + output_files["misc_dir"] = os.path.join(cfg["common"].output_dir, "misc") + ### if mismatch correction is enabled then result contigs are copied to misc directory + output_files["assembled_contigs_filename"] = os.path.join(output_files["misc_dir"], "assembled_contigs.fasta") + output_files["assembled_scaffolds_filename"] = os.path.join(output_files["misc_dir"], "assembled_scaffolds.fasta") + return output_files + + +def get_stage(iteration_name): + if not options_storage.args.continue_mode: + return options_storage.BASE_STAGE + + if options_storage.args.restart_from is not None and \ + options_storage.args.restart_from != options_storage.LAST_STAGE: + if ":" in options_storage.args.restart_from and \ + iteration_name == options_storage.args.restart_from.split(":")[0]: + return options_storage.args.restart_from.split(":")[-1] + else: + return options_storage.BASE_STAGE + + if get_stage.restart_stage is None: + last_command = get_first_incomplete_command(os.path.join(get_stage.cfg["common"].output_dir, "run_spades.yaml")) + + if last_command is not None: + get_stage.restart_stage = last_command["short_name"] + else: + get_stage.restart_stage = "finish" + + if iteration_name == get_stage.restart_stage: + return options_storage.LAST_STAGE + else: + return options_storage.BASE_STAGE + + +def build_pipeline(pipeline, cfg, output_files, tmp_configs_dir, dataset_data, log, bin_home, + ext_python_modules_home, python_modules_home): + from stages import error_correction_stage + from stages import spades_stage + from stages import postprocessing_stage + from stages import correction_stage + from stages import check_test_stage + from stages import breaking_scaffolds_stage + from stages import preprocess_reads_stage + from stages import terminating_stage + + preprocess_reads_stage.add_to_pipeline(pipeline, cfg, output_files, tmp_configs_dir, dataset_data, log, bin_home, + ext_python_modules_home, python_modules_home) + error_correction_stage.add_to_pipeline(pipeline, cfg, output_files, tmp_configs_dir, dataset_data, log, bin_home, + ext_python_modules_home, python_modules_home) + + get_stage.cfg, get_stage.restart_stage = cfg, None + spades_stage.add_to_pipeline(pipeline, get_stage, cfg, output_files, tmp_configs_dir, dataset_data, log, bin_home, + ext_python_modules_home, python_modules_home) + postprocessing_stage.add_to_pipeline(pipeline, cfg, output_files, tmp_configs_dir, dataset_data, log, bin_home, + ext_python_modules_home, python_modules_home) + correction_stage.add_to_pipeline(pipeline, cfg, output_files, tmp_configs_dir, dataset_data, log, bin_home, + ext_python_modules_home, python_modules_home) + check_test_stage.add_to_pipeline(pipeline, cfg, output_files, tmp_configs_dir, dataset_data, log, bin_home, + ext_python_modules_home, python_modules_home) + breaking_scaffolds_stage.add_to_pipeline(pipeline, cfg, output_files, tmp_configs_dir, dataset_data, log, bin_home, + ext_python_modules_home, python_modules_home) + terminating_stage.add_to_pipeline(pipeline, cfg, output_files, tmp_configs_dir, dataset_data, log, bin_home, + ext_python_modules_home, python_modules_home) + + +def check_dir_is_empty(dir_name): + if dir_name is not None and \ + os.path.exists(dir_name) and \ + os.listdir(dir_name): + support.warning("output dir is not empty! Please, clean output directory before run.") + + +def init_parser(args): + if options_parser.is_first_run(): + options_storage.first_command_line = args + check_dir_is_empty(options_parser.get_output_dir_from_args()) + else: + command_line, options, script, err_msg = get_options_from_params( + os.path.join(options_parser.get_output_dir_from_args(), "params.txt"), + args[0]) + + if err_msg != "": + support.error(err_msg) + + options_storage.first_command_line = [script] + options + + +def main(args): + os.environ["LC_ALL"] = "C" + + init_parser(args) + + if len(args) == 1: + options_parser.usage(spades_version) + sys.exit(0) + + pipeline = Pipeline() + + log = create_logger() + cfg, dataset_data, command_line = parse_args(args, log) + log_filename, log_handler = add_file_to_log(cfg, log) + print_params(log, log_filename, command_line, args, cfg) + + if not options_storage.args.continue_mode: + log.info("\n======= SPAdes pipeline started. Log can be found here: " + log_filename + "\n") + + support.check_binaries(bin_home, log) + try: + output_files = get_output_files(cfg) + tmp_configs_dir = os.path.join(cfg["common"].output_dir, "configs") + + build_pipeline(pipeline, cfg, output_files, tmp_configs_dir, dataset_data, log, + bin_home, ext_python_modules_home, python_modules_home) + + if options_storage.args.restart_from: + draft_commands = pipeline.get_commands(cfg) + command_before_restart_from, stage_id_before_restart_from = \ + get_command_and_stage_id_before_restart_from(draft_commands, cfg, log) + clear_configs(cfg, log, command_before_restart_from, stage_id_before_restart_from) + + pipeline.generate_configs(cfg, spades_home, tmp_configs_dir) + commands = pipeline.get_commands(cfg) + + executor = executor_save_yaml.Executor(log) + executor.execute(commands) + + if not options_storage.args.only_generate_config: + executor = executor_local.Executor(log) + executor.execute(commands) + print_info_about_output_files(cfg, log, output_files) + + if not support.log_warnings(log): + log.info("\n======= SPAdes pipeline finished.") + + except Exception: + exc_type, exc_value, _ = sys.exc_info() + if exc_type == SystemExit: + sys.exit(exc_value) + else: + import errno + if exc_type == OSError and exc_value.errno == errno.ENOEXEC: # Exec format error + support.error("it looks like you are using SPAdes binaries for another platform.\n" + + support.get_spades_binaries_info_message()) + else: + log.exception(exc_value) + support.error("exception caught: %s" % exc_type, log) + except BaseException: # since python 2.5 system-exiting exceptions (e.g. KeyboardInterrupt) are derived from BaseException + exc_type, exc_value, _ = sys.exc_info() + if exc_type == SystemExit: + sys.exit(exc_value) + else: + log.exception(exc_value) + support.error("exception caught: %s" % exc_type, log) + finally: + log.info("\nSPAdes log can be found here: %s" % log_filename) + log.info("") + log.info("Thank you for using SPAdes!") + log.removeHandler(log_handler) + + +if __name__ == "__main__": + main(sys.argv) \ No newline at end of file diff --git a/src/SPAdes-3.10.1-Linux/bin/spades_init.py b/src/SPAdes-3.14.0-Linux/bin/spades_init.py similarity index 52% rename from src/SPAdes-3.10.1-Linux/bin/spades_init.py rename to src/SPAdes-3.14.0-Linux/bin/spades_init.py index 4baebdd..0bcd4e5 100644 --- a/src/SPAdes-3.10.1-Linux/bin/spades_init.py +++ b/src/SPAdes-3.14.0-Linux/bin/spades_init.py @@ -1,7 +1,7 @@ #!/usr/bin/env python ############################################################################ -# Copyright (c) 2015 Saint Petersburg State University +# Copyright (c) 2015-2019 Saint Petersburg State University # Copyright (c) 2011-2014 Saint Petersburg Academic University # All Rights Reserved # See file LICENSE for details. @@ -11,14 +11,14 @@ import sys from os.path import abspath, dirname, realpath, join, isfile -source_dirs = ["", "truspades", "common"] +source_dirs = ["", "truspades", "common", "executors", "scripts"] # developers configuration spades_home = abspath(dirname(realpath(__file__))) -bin_home = join(spades_home, 'bin') -python_modules_home = join(spades_home, 'src') -ext_python_modules_home = join(spades_home, 'ext', 'src', 'python_libs') -spades_version = '' +bin_home = join(spades_home, "bin") +python_modules_home = join(spades_home, "src") +ext_python_modules_home = join(spades_home, "ext", "src", "python_libs") +spades_version = "" def init(): @@ -29,19 +29,19 @@ def init(): global ext_python_modules_home # users configuration (spades_init.py and spades binary are in the same directory) - if isfile(os.path.join(spades_home, 'spades')): + if isfile(os.path.join(spades_home, "spades-core")): install_prefix = dirname(spades_home) - bin_home = join(install_prefix, 'bin') - spades_home = join(install_prefix, 'share', 'spades') + bin_home = join(install_prefix, "bin") + spades_home = join(install_prefix, "share", "spades") python_modules_home = spades_home ext_python_modules_home = spades_home for dir in source_dirs: - sys.path.append(join(python_modules_home, 'spades_pipeline', dir)) + sys.path.append(join(python_modules_home, "spades_pipeline", dir)) - spades_version = open(join(spades_home, 'VERSION'), 'r').readline().strip() + spades_version = open(join(spades_home, "VERSION"), 'r').readline().strip() -if __name__ == '__main__': - spades_py_path = join(dirname(realpath(__file__)), 'spades.py') - sys.stderr.write('Please use ' + spades_py_path + ' for running SPAdes genome assembler\n') \ No newline at end of file +if __name__ == "__main__": + spades_py_path = join(dirname(realpath(__file__)), "spades.py") + sys.stderr.write("Please use " + spades_py_path + " for running SPAdes genome assembler\n") diff --git a/src/SPAdes-3.14.0-Linux/bin/spaligner b/src/SPAdes-3.14.0-Linux/bin/spaligner new file mode 100755 index 0000000..9132ffe Binary files /dev/null and b/src/SPAdes-3.14.0-Linux/bin/spaligner differ diff --git a/src/SPAdes-3.10.1-Linux/bin/truspades.py b/src/SPAdes-3.14.0-Linux/bin/truspades.py similarity index 97% rename from src/SPAdes-3.10.1-Linux/bin/truspades.py rename to src/SPAdes-3.14.0-Linux/bin/truspades.py index dbca5d3..ecf4e41 100755 --- a/src/SPAdes-3.10.1-Linux/bin/truspades.py +++ b/src/SPAdes-3.14.0-Linux/bin/truspades.py @@ -17,11 +17,12 @@ spades_home = os.path.abspath(os.path.dirname(os.path.realpath(__file__))) spades_version = spades_init.spades_version -import SeqIO # TODO: add to ext/scr/python_libs -import parallel_launcher +import support +from common import SeqIO # TODO: add to ext/scr/python_libs +from common import parallel_launcher +# the next modules are from spades_pipeline/truspades/ (can't write "from truspades import ..." since we are in truspades.py) import reference_construction import launch_options -import support import barcode_extraction def generate_dataset(input_dirs, log): diff --git a/src/SPAdes-3.10.1-Linux/share/spades/GPLv2.txt b/src/SPAdes-3.14.0-Linux/share/spades/GPLv2.txt similarity index 100% rename from src/SPAdes-3.10.1-Linux/share/spades/GPLv2.txt rename to src/SPAdes-3.14.0-Linux/share/spades/GPLv2.txt diff --git a/src/SPAdes-3.10.1-Linux/share/spades/LICENSE b/src/SPAdes-3.14.0-Linux/share/spades/LICENSE similarity index 78% rename from src/SPAdes-3.10.1-Linux/share/spades/LICENSE rename to src/SPAdes-3.14.0-Linux/share/spades/LICENSE index 0438b8d..a26d33b 100644 --- a/src/SPAdes-3.10.1-Linux/share/spades/LICENSE +++ b/src/SPAdes-3.14.0-Linux/share/spades/LICENSE @@ -1,5 +1,5 @@ SPADES: SAINT-PETERSBURG GENOME ASSEMBLER -Copyright (c) 2015-2017 Saint Petersburg State University +Copyright (c) 2015-2019 Saint Petersburg State University Copyright (c) 2011-2014 Saint Petersburg Academic University SPAdes is free software; you can redistribute it and/or modify @@ -17,29 +17,41 @@ with this program; if not, write to the Free Software Foundation, Inc., ------------------------------------------------------------------------------- +SPAdes +Genome assembler for single-cell and isolates data sets +Version: see VERSION + +Developed in Center for Algorithmic Biotechnology, Institute of Translational Biomedicine, St. Petersburg State University. +Developed in Algorithmic Biology Lab of St. Petersburg Academic University of the Russian Academy of Sciences. + Current SPAdes contributors: Dmitry Antipov, - Anton Bankevich, - Yuriy Gorshkov, + Elena Bushmanova, Alexey Gurevich, Anton Korobeynikov, + Olga Kunyavskaya, Dmitriy Meleshko, Sergey Nurk, Andrey Prjibelski, - Yana Safonova, + Alexander Shlemov, + Ivan Tolstoganov, Alla Lapidus and Pavel Pevzner Also contributed: Max Alekseyev, + Anton Bankevich, Mikhail Dvorkin, + Vasisliy Ershov, + Yuriy Gorshkov, Alexander Kulikov, Valery Lesin, Sergey Nikolenko, Son Pham, Alexey Pyshkin, + Yana Safonova, Vladislav Saveliev, Alexander Sirotkin, Yakov Sirotkin, @@ -48,9 +60,10 @@ Also contributed: Irina Vasilinetc, Nikolay Vyahhi -Contacts: - http://cab.spbu.ru/software/spades/ - spades.support@cab.spbu.ru +Installation instructions and manual can be found on the website: +http://cab.spbu.ru/software/spades/ + +Address for communication: spades.support@cab.spbu.ru References: diff --git a/src/SPAdes-3.14.0-Linux/share/spades/README.md b/src/SPAdes-3.14.0-Linux/share/spades/README.md new file mode 100644 index 0000000..ad0026c --- /dev/null +++ b/src/SPAdes-3.14.0-Linux/share/spades/README.md @@ -0,0 +1,1209 @@ +__SPAdes 3.14.0 Manual__ + + +1. [About SPAdes](#sec1)
    +    1.1. [Supported data types](#sec1.1)
    +    1.2. [SPAdes pipeline](#sec1.2)
    +    1.3. [SPAdes performance](#sec1.3)
    +2. [Installation](#sec2)
    +    2.1. [Downloading SPAdes Linux binaries](#sec2.1)
    +    2.2. [Downloading SPAdes binaries for Mac](#sec2.2)
    +    2.3. [Downloading and compiling SPAdes source code](#sec2.3)
    +    2.4. [Verifying your installation](#sec2.4)
    +3. [Running SPAdes](#sec3)
    +    3.1. [SPAdes input](#sec3.1)
    +    3.2. [SPAdes command line options](#sec3.2)
    +    3.3. [Assembling IonTorrent reads](#sec3.3)
    +    3.4. [Assembling long Illumina paired reads (2x150 and 2x250)](#sec3.4)
    +    3.5. [SPAdes output](#sec3.5)
    +    3.6. [plasmidSPAdes output](#sec3.6)
    +    3.7. [biosyntheticSPAdes output](#sec3.7)
    +    3.8. [Assembly evaluation](#sec3.8)
    +4. [Stand-alone binaries released within SPAdes package](#sec4)
    +    4.1. [k-mer counting](#sec4.1)
    +    4.2. [k-mer coverage read filter](#sec4.2)
    +    4.3. [k-mer cardinality estimating](#sec4.3)
    +    4.4. [Graph construction](#sec4.4)
    +    4.5. [Long read to graph alignment](#sec4.5)
    +        4.5.1. [hybridSPAdes aligner](#sec4.5.1)
    +        4.5.2. [SPAligner](#sec4.5.2)
    +5. [Citation](#sec5)
    +6. [Feedback and bug reports](#sec6)
    + + +# About SPAdes + +SPAdes – St. Petersburg genome assembler – is an assembly toolkit containing various assembly pipelines. This manual will help you to install and run SPAdes. SPAdes version 3.14.0 was released under GPLv2 on December 27, 2019 and can be downloaded from . []() + + +## Supported data types + +The current version of SPAdes works with Illumina or IonTorrent reads and is capable of providing hybrid assemblies using PacBio, Oxford Nanopore and Sanger reads. You can also provide additional contigs that will be used as long reads. + +Version 3.14.0 of SPAdes supports paired-end reads, mate-pairs and unpaired reads. SPAdes can take as input several paired-end and mate-pair libraries simultaneously. Note, that SPAdes was initially designed for small genomes. It was tested on bacterial (both single-cell MDA and standard isolates), fungal and other small genomes. SPAdes is not intended for larger genomes (e.g. mammalian size genomes). For such purposes you can use it at your own risk. + +If you have high-coverage data for bacterial/viral isolate or multi-cell organism, we highly recommend to use [`--isolate`](#isolate) option. + +SPAdes 3.14.0 includes the following additional pipelines: +- metaSPAdes – a pipeline for metagenomic data sets (see [metaSPAdes options](#meta)). +- plasmidSPAdes – a pipeline for extracting and assembling plasmids from WGS data sets (see [plasmidSPAdes options](#plasmid)). +- rnaSPAdes – a *de novo* transcriptome assembler from RNA-Seq data (see [rnaSPAdes manual](assembler/rnaspades_manual.html)). +- truSPAdes – a module for TruSeq barcode assembly (see [truSPAdes manual](assembler/truspades_manual.html)). +- biosyntheticSPAdes – a module for biosynthetic gene cluster assembly with paired-end reads (see [biosynthicSPAdes options](#biosynthetic)). + +In addition, we provide several stand-alone binaries with relatively simple command-line interface: [k-mer counting](#sec4.1) (`spades-kmercounter`), [assembly graph construction](#sec4.2) (`spades-gbuilder`) and [long read to graph aligner](#sec4.3) (`spades-gmapper`). To learn options of these tools you can either run them without any parameters or read [this section](#sec4). + +[]() + + +## SPAdes pipeline + +SPAdes comes in several separate modules: + +- [BayesHammer](http://bioinf.spbau.ru/en/spades/bayeshammer) – read error correction tool for Illumina reads, which works well on both single-cell and standard data sets. +- IonHammer – read error correction tool for IonTorrent data, which also works on both types of data. +- SPAdes – iterative short-read genome assembly module; values of K are selected automatically based on the read length and data set type. +- MismatchCorrector – a tool which improves mismatch and short indel rates in resulting contigs and scaffolds; this module uses the [BWA](http://bio-bwa.sourceforge.net) tool \[[Li H. and Durbin R., 2009](http://www.ncbi.nlm.nih.gov/pubmed/19451168)\]; MismatchCorrector is turned off by default, but we recommend to turn it on (see [SPAdes options section](#correctoropt)). + +We recommend to run SPAdes with BayesHammer/IonHammer to obtain high-quality assemblies. However, if you use your own read correction tool, it is possible to turn error correction module off. It is also possible to use only the read error correction stage, if you wish to use another assembler. See the [SPAdes options section](#pipelineopt). []() + + +## SPAdes performance + +In this section we give approximate data about SPAdes performance on two data sets: + +- [Standard isolate *E. coli*](http://spades.bioinf.spbau.ru/spades_test_datasets/ecoli_mc/); 6.2Gb, 28M reads, 2x100bp, insert size ~ 215bp +- [MDA single-cell *E. coli*](http://spades.bioinf.spbau.ru/spades_test_datasets/ecoli_sc/); 6.3 Gb, 29M reads, 2x100bp, insert size ~ 270bp + +We ran SPAdes with default parameters using 16 threads on a server with Intel Xeon 2.27GHz processors. BayesHammer runs in approximately half an hour and takes up to 8Gb of RAM to perform read error correction on each data set. Assembly takes about 10 minutes for the *E. coli* isolate data set and 20 minutes for the *E. coli* single-cell data set. Both data sets require about 8Gb of RAM (see notes below). MismatchCorrector runs for about 15 minutes on both data sets, and requires less than 2Gb of RAM. All modules also require additional disk space for storing results (corrected reads, contigs, etc) and temporary files. See the table below for more precise values. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    Data set   E. coli isolate E. coli single-cell
    Stage Time Peak RAM
    usage (Gb)
    Additional
    disk space (Gb)
    Time Peak RAM
    usage (Gb)
    Additional
    disk space (Gb)
    BayesHammer 24m 7.8 8.5 25m 7.7 8.6
    SPAdes 8m 8.4 1.4 10m 8.3 2.1
    MismatchCorrector 10m 1.7 21.4 12m 1.8 22.4
    Whole pipeline 42m 8.4 23.9 47m 8.3 25.1
    + +Notes: + +- Running SPAdes without preliminary read error correction (e.g. without BayesHammer or IonHammer) will likely require more time and memory. +- Each module removes its temporary files as soon as it finishes. +- SPAdes uses 512 Mb per thread for buffers, which results in higher memory consumption. If you set memory limit manually, SPAdes will use smaller buffers and thus less RAM. +- Performance statistics is given for SPAdes version 3.14.0. + + +# Installation + + +SPAdes requires a 64-bit Linux system or Mac OS and Python (supported versions are Python2: 2.4–2.7, and Python3: 3.2 and higher) to be pre-installed on it. To obtain SPAdes you can either download binaries or download source code and compile it yourself. []() + +In case of successful installation the following files will be placed in the `bin` directory: + +- `spades.py` (main executable script) +- `metaspades.py` (main executable script for [metaSPAdes](#meta)) +- `plasmidspades.py` (main executable script for [plasmidSPAdes](#plasmid)) +- `rnaspades.py` (main executable script for [rnaSPAdes](assembler/rnaspades_manual.html)) +- `truspades.py` (main executable script for [truSPAdes](assembler/truspades_manual.html)) +- `spades-core` (assembly module) +- `spades-gbuilder` (standalone graph builder application) +- `spades-gmapper` (standalone long read to graph aligner) +- `spades-kmercount` (standalone k-mer counting application) +- `spades-hammer` (read error correcting module for Illumina reads) +- `spades-ionhammer` (read error correcting module for IonTorrent reads) +- `spades-bwa` ([BWA](http://bio-bwa.sourceforge.net) alignment module which is required for mismatch correction) +- `spades-corrector-core` (mismatch correction module) +- `spades-truseq-scfcorrection` (executable used in truSPAdes pipeline) + + +## Downloading SPAdes Linux binaries + +To download [SPAdes Linux binaries](http://cab.spbu.ru/files/release3.14.0/SPAdes-3.14.0-Linux.tar.gz) and extract them, go to the directory in which you wish SPAdes to be installed and run: + +``` bash + + wget http://cab.spbu.ru/files/release3.14.0/SPAdes-3.14.0-Linux.tar.gz + tar -xzf SPAdes-3.14.0-Linux.tar.gz + cd SPAdes-3.14.0-Linux/bin/ +``` + +In this case you do not need to run any installation scripts – SPAdes is ready to use. We also suggest adding SPAdes installation directory to the `PATH` variable. []() + +Note, that pre-build binaries do not work on new Linux kernels. + + +## Downloading SPAdes binaries for Mac + +To obtain [SPAdes binaries for Mac](http://cab.spbu.ru/files/release3.14.0/SPAdes-3.14.0-Darwin.tar.gz), go to the directory in which you wish SPAdes to be installed and run: + +``` bash + + curl http://cab.spbu.ru/files/release3.14.0/SPAdes-3.14.0-Darwin.tar.gz -o SPAdes-3.14.0-Darwin.tar.gz + tar -zxf SPAdes-3.14.0-Darwin.tar.gz + cd SPAdes-3.14.0-Darwin/bin/ +``` + +Just as in Linux, SPAdes is ready to use and no further installation steps are required. We also suggest adding SPAdes installation directory to the `PATH` variable. []() + + +## Downloading and compiling SPAdes source code + +If you wish to compile SPAdes by yourself you will need the following libraries to be pre-installed: + +- g++ (version 5.3.1 or higher) +- cmake (version 2.8.12 or higher) +- zlib +- libbz2 + +If you meet these requirements, you can download the [SPAdes source code](http://cab.spbu.ru/files/release3.14.0/SPAdes-3.14.0.tar.gz): + +``` bash + + wget http://cab.spbu.ru/files/release3.14.0/SPAdes-3.14.0.tar.gz + tar -xzf SPAdes-3.14.0.tar.gz + cd SPAdes-3.14.0 +``` + +and build it with the following script: + +``` bash + + ./spades_compile.sh +``` + +SPAdes will be built in the directory `./bin`. If you wish to install SPAdes into another directory, you can specify full path of destination folder by running the following command in `bash` or `sh`: + +``` bash + + PREFIX= ./spades_compile.sh +``` + +for example: + +``` bash + + PREFIX=/usr/local ./spades_compile.sh +``` + +which will install SPAdes into `/usr/local/bin`. + +After installation you will get the same files (listed above) in `./bin` directory (or `/bin` if you specified PREFIX). We also suggest adding SPAdes installation directory to the `PATH` variable. []() + + +## Verifying your installation + +For testing purposes, SPAdes comes with a toy data set (reads that align to first 1000 bp of *E. coli*). To try SPAdes on this data set, run: + +``` bash + + /spades.py --test +``` + +If you added SPAdes installation directory to the `PATH` variable, you can run: + +``` bash + + spades.py --test +``` + +For the simplicity we further assume that SPAdes installation directory is added to the `PATH` variable. + +If the installation is successful, you will find the following information at the end of the log: + +``` plain + +===== Assembling finished. Used k-mer sizes: 21, 33, 55 + + * Corrected reads are in spades_test/corrected/ + * Assembled contigs are in spades_test/contigs.fasta + * Assembled scaffolds are in spades_test/scaffolds.fasta + * Assembly graph is in spades_test/assembly_graph.fastg + * Assembly graph in GFA format is in spades_test/assembly_graph.gfa + * Paths in the assembly graph corresponding to the contigs are in spades_test/contigs.paths + * Paths in the assembly graph corresponding to the scaffolds are in spades_test/scaffolds.paths + +======= SPAdes pipeline finished. + +========= TEST PASSED CORRECTLY. + +SPAdes log can be found here: spades_test/spades.log + +Thank you for using SPAdes! +``` + + +# Running SPAdes + + +## SPAdes input + +SPAdes takes as input paired-end reads, mate-pairs and single (unpaired) reads in FASTA and FASTQ. For IonTorrent data SPAdes also supports unpaired reads in unmapped BAM format (like the one produced by Torrent Server). However, in order to run read error correction, reads should be in FASTQ or BAM format. Sanger, Oxford Nanopore and PacBio CLR reads can be provided in both formats since SPAdes does not run error correction for these types of data. + +To run SPAdes 3.14.0 you need at least one library of the following types: + +- Illumina paired-end/high-quality mate-pairs/unpaired reads +- IonTorrent paired-end/high-quality mate-pairs/unpaired reads +- PacBio CCS reads + +Illumina and IonTorrent libraries should not be assembled together. All other types of input data are compatible. SPAdes should not be used if only PacBio CLR, Oxford Nanopore, Sanger reads or additional contigs are available. + +SPAdes supports mate-pair only assembly. However, we recommend to use only high-quality mate-pair libraries in this case (e.g. that do not have a paired-end part). We tested mate-pair only pipeline using Illumina Nextera mate-pairs. See more [here](#hqmp). + +Current version SPAdes also supports Lucigen NxSeq® Long Mate Pair libraries, which always have forward-reverse orientation. If you wish to use Lucigen NxSeq® Long Mate Pair reads, you will need Python [regex library](https://pypi.python.org/pypi/regex) to be pre-installed on your machine. You can install it with Python [pip-installer](http://www.pip-installer.org/): + +``` bash + + pip install regex +``` + +or with the [Easy Install](http://peak.telecommunity.com/DevCenter/EasyInstall) Python module: + +``` bash + + easy_install regex +``` + +Notes: + +- It is strongly suggested to provide multiple paired-end and mate-pair libraries according to their insert size (from smallest to longest). +- It is not recommended to run SPAdes on PacBio reads with low coverage (less than 5). +- We suggest not to run SPAdes on PacBio reads for large genomes. +- SPAdes accepts gzip-compressed files. + +### Read-pair libraries + +By using command line interface, you can specify up to nine different paired-end libraries, up to nine mate-pair libraries and also up to nine high-quality mate-pair ones. If you wish to use more, you can use [YAML data set file](#yaml). We further refer to paired-end and mate-pair libraries simply as to read-pair libraries. + +By default, SPAdes assumes that paired-end and high-quality mate-pair reads have forward-reverse (fr) orientation and usual mate-pairs have reverse-forward (rf) orientation. However, different orientations can be set for any library by using SPAdes options. + +To distinguish reads in pairs we refer to them as left and right reads. For forward-reverse orientation, the forward reads correspond to the left reads and the reverse reads, to the right. Similarly, in reverse-forward orientation left and right reads correspond to reverse and forward reads, respectively, etc. + +Each read-pair library can be stored in several files or several pairs of files. Paired reads can be organized in two different ways: + +- In file pairs. In this case left and right reads are placed in different files and go in the same order in respective files. +- In interleaved files. In this case, the reads are interlaced, so that each right read goes after the corresponding paired left read. + +For example, Illumina produces paired-end reads in two files: `R1.fastq` and `R2.fastq`. If you choose to store reads in file pairs make sure that for every read from `R1.fastq` the corresponding paired read from `R2.fastq` is placed in the respective paired file on the same line number. If you choose to use interleaved files, every read from `R1.fastq` should be followed by the corresponding paired read from `R2.fastq`. + +If adapter and/or quality trimming software has been used prior to assembly, files with the orphan reads can be provided as "single read files" for the corresponding read-pair library. + + +If you have merged some of the reads from your paired-end (not mate-pair or high-quality mate-pair) library (using tools s.a. [BBMerge](https://jgi.doe.gov/data-and-tools/bbtools/bb-tools-user-guide/bbmerge-guide/) or [STORM](https://bitbucket.org/yaoornl/align_test/overview)), you should provide the file with resulting reads as a "merged read file" for the corresponding library. +Note that non-empty files with the remaining unmerged left/right reads (separate or interlaced) must be provided for the same library (for SPAdes to correctly detect the original read length). + +In an unlikely case some of the reads from your mate-pair (or high-quality mate-pair) library are "merged", you should provide the resulting reads as a SEPARATE single-read library. + +### Unpaired (single-read) libraries + +By using command line interface, you can specify up to nine different single-read libraries. To input more libraries, you can use [YAML data set file](#yaml). + +Single librairies are assumed to have high quality and a reasonable coverage. For example, you can provide PacBio CCS reads as a single-read library. + +Note, that you should not specify PacBio CLR, Sanger reads or additional contigs as single-read libraries, each of them has a separate [option](#inputdata). []() + + +### PacBio and Oxford Nanopore reads + +SPAdes can take as an input an unlimited number of PacBio and Oxford Nanopore libraries. + +PacBio CLR and Oxford Nanopore reads are used for hybrid assemblies (e.g. with Illumina or IonTorrent). There is no need to pre-correct this kind of data. SPAdes will use PacBio CLR and Oxford Nanopore reads for gap closure and repeat resolution. + +For PacBio you just need to have filtered subreads in FASTQ/FASTA format. Provide these filtered subreads using `--pacbio` option. Oxford Nanopore reads are provided with `--nanopore` option. + +PacBio CCS/Reads of Insert reads or pre-corrected (using third-party software) PacBio CLR / Oxford Nanopore reads can be simply provided as single reads to SPAdes. + +### Additional contigs + +In case you have contigs of the same genome generated by other assembler(s) and you wish to merge them into SPAdes assembly, you can specify additional contigs using `--trusted-contigs` or `--untrusted-contigs`. First option is used when high quality contigs are available. These contigs will be used for graph construction, gap closure and repeat resolution. Second option is used for less reliable contigs that may have more errors or contigs of unknown quality. These contigs will be used only for gap closure and repeat resolution. The number of additional contigs is unlimited. + +Note, that SPAdes does not perform assembly using genomes of closely-related species. Only contigs of the same genome should be specified. + +[]() + +## SPAdes command line options + +To run SPAdes from the command line, type + +``` bash + + spades.py [options] -o +``` + +Note that we assume that SPAdes installation directory is added to the `PATH` variable (provide full path to SPAdes executable otherwise: `/spades.py`). []() + + +### Basic options + +`-o ` +    Specify the output directory. Required option. + +[]() + + +`--isolate ` +    This flag is highly recommended for high-coverage isolate and multi-cell data; improves the assembly quality and running time. + Not compatible with `--only-error-correction` or `--careful` options. + + +`--sc ` +    This flag is required for MDA (single-cell) data. + +[]() + + +`--meta `   (same as `metaspades.py`) +    This flag is recommended when assembling metagenomic data sets (runs metaSPAdes, see [paper](https://genome.cshlp.org/content/27/5/824.short) for more details). Currently metaSPAdes supports only a **_single_** short-read library which has to be **_paired-end_** (we hope to remove this restriction soon). In addition, you can provide long reads (e.g. using `--pacbio` or `--nanopore` options), but hybrid assembly for metagenomes remains an experimental pipeline and optimal performance is not guaranteed. It does not support [careful mode](#correctoropt) (mismatch correction is not available). In addition, you cannot specify coverage cutoff for metaSPAdes. Note that metaSPAdes might be very sensitive to presence of the technical sequences remaining in the data (most notably adapter readthroughs), please run quality control and pre-process your data accordingly. + +[]() + + +`--plasmid `   (same as `plasmidspades.py`) +    This flag is required when assembling only plasmids from WGS data sets (runs plasmidSPAdes, see [paper](http://biorxiv.org/content/early/2016/04/20/048942) for the algorithm details). Note, that plasmidSPAdes is not compatible with [metaSPAdes](#meta) and [single-cell mode](#sc). Additionally, we do not recommend to run plasmidSPAdes on more than one library. See [section 3.6](#sec3.6) for plasmidSPAdes output details. + +[]() + + +`--bio ` +    This flag is required when assembling only non-ribosomal and polyketide gene clusters from WGS data sets (runs biosyntheticSPAdes, see [paper](https://genome.cshlp.org/content/early/2019/06/03/gr.243477.118?top=1) for the algorithm details). biosyntheticSPAdes is supposed to work on isolate or metagenomic WGS dataset. Note, that biosyntheticSPAdes is not compatible with any other modes. See [section 3.7](#sec3.7) for biosyntheticSPAdes output details. + +[]() + + +`--rna `   (same as `rnaspades.py`) +    This flag should be used when assembling RNA-Seq data sets (runs rnaSPAdes). To learn more, see [rnaSPAdes manual](assembler/rnaspades_manual.html). + Not compatible with `--only-error-correction` or `--careful` options. + + +`--iontorrent ` +    This flag is required when assembling IonTorrent data. Allows BAM files as input. Carefully read [section 3.3](#sec3.3) before using this option. + +`--test` +    Runs SPAdes on the toy data set; see [section 2.4](#sec2.4). + +`-h` (or `--help`) +    Prints help. + +`-v` (or `--version`) +    Prints SPAdes version. + +[]() + +### Pipeline options + +`--only-error-correction` +    Performs read error correction only. + +`--only-assembler` +    Runs assembly module only. + +[]() + +`--careful` +    Tries to reduce the number of mismatches and short indels. Also runs MismatchCorrector – a post processing tool, which uses [BWA](http://bio-bwa.sourceforge.net) tool (comes with SPAdes). This option is recommended only for assembly of small genomes. We strongly recommend not to use it for large and medium-size eukaryotic genomes. Note, that this options is is not supported by metaSPAdes and rnaSPAdes. + +`--continue` +    Continues SPAdes run from the specified output folder starting from the last available check-point. Check-points are made after: + +- error correction module is finished +- iteration for each specified K value of assembly module is finished +- mismatch correction is finished for contigs or scaffolds + +For example, if specified K values are 21, 33 and 55 and SPAdes was stopped or crashed during assembly stage with K = 55, you can run SPAdes with the `--continue` option specifying the same output directory. SPAdes will continue the run starting from the assembly stage with K = 55. Error correction module and iterations for K equal to 21 and 33 will not be run again. If `--continue` is set, the only allowed option is `-o `. + +`--restart-from ` +    Restart SPAdes run from the specified output folder starting from the specified check-point. Check-points are: + +- `ec` – start from error correction +- `as` – restart assembly module from the first iteration +- `k` – restart from the iteration with specified k values, e.g. k55 (not available in RNA-Seq mode) +- `mc` – restart mismatch correction +- `last` – restart from the last available check-point (similar to `--continue`) + +In contrast to the `--continue` option, you can change some of the options when using `--restart-from`. You can change any option except: all basic options, all options for specifying input data (including `--dataset`), `--only-error-correction` option and `--only-assembler` option. For example, if you ran assembler with k values 21,33,55 without mismatch correction, you can add one more iteration with k=77 and run mismatch correction step by running SPAdes with following options: +`--restart-from k55 -k 21,33,55,77 --mismatch-correction -o `. +Since all files will be overwritten, do not forget to copy your assembly from the previous run if you need it. + +`--disable-gzip-output` +    Forces read error correction module not to compress the corrected reads. If this options is not set, corrected reads will be in `*.fastq.gz` format. + +[]() + + +### Input data + +#### Specifying single library (paired-end or single-read) + +`--12 ` +    File with interlaced forward and reverse paired-end reads. + +`-1 ` +    File with forward reads. + +`-2 ` +    File with reverse reads. + +`--merged ` +    File with merged paired reads. +    If the properties of the library permit, overlapping paired-end reads can be merged using special software. +    Non-empty files with (remaining) unmerged left/right reads (separate or interlaced) must be provided for the same library for SPAdes to correctly detect the original read length. + +`-s ` +    File with unpaired reads. + +#### Specifying multiple libraries + +**_Single-read libraries_** + +`--s<#> ` +    File for single-read library number `<#>` (`<#>` = 1,2,..,9). For example, for the first paired-end library the option is: `--s1 ` +    Do not use `-s` options for single-read libraries, since it specifies unpaired reads for the first paired-end library. + +**_Paired-end libraries_** + +`--pe<#>-12 ` +    File with interlaced reads for paired-end library number `<#>` (`<#>` = 1,2,..,9). For example, for the first single-read library the option is: `--pe1-12 ` + +`--pe<#>-1 ` +    File with left reads for paired-end library number `<#>` (`<#>` = 1,2,..,9). + +`--pe<#>-2 ` +    File with right reads for paired-end library number `<#>` (`<#>` = 1,2,..,9). + +`--pe<#>-m ` +    File with merged reads from paired-end library number `<#>` (`<#>` = 1,2,..,9) +    If the properties of the library permit, paired reads can be merged using special software.     Non-empty files with (remaining) unmerged left/right reads (separate or interlaced) must be provided for the same library for SPAdes to correctly detect the original read length. + +`--pe<#>-s ` +    File with unpaired reads from paired-end library number `<#>` (`<#>` = 1,2,..,9) +    For example, paired reads can become unpaired during the error correction procedure. + +`--pe<#>- ` +    Orientation of reads for paired-end library number `<#>` (`<#>` = 1,2,..,9; `` = "fr","rf","ff"). +    The default orientation for paired-end libraries is forward-reverse (`--> <--`). For example, to specify reverse-forward orientation for the second paired-end library, you should use the flag: `--pe2-rf ` + Should not be confused with FR and RF strand-specificity for RNA-Seq data (see rnaSPAdes manual). + +**_Mate-pair libraries_** + +`--mp<#>-12 ` +    File with interlaced reads for mate-pair library number `<#>` (`<#>` = 1,2,..,9). + +`--mp<#>-1 ` +    File with left reads for mate-pair library number `<#>` (`<#>` = 1,2,..,9). + +`--mp<#>-2 ` +    File with right reads for mate-pair library number `<#>` (`<#>` = 1,2,..,9). + +`--mp<#>- ` +    Orientation of reads for mate-pair library number `<#>` (`<#>` = 1,2,..,9; `` = "fr","rf","ff"). +    The default orientation for mate-pair libraries is reverse-forward (`<-- -->`). For example, to specify forward-forward orientation for the first mate-pair library, you should use the flag: `--mp1-ff ` + + +**_High-quality mate-pair libraries_** (can be used for mate-pair only assembly) + +`--hqmp<#>-12 ` +    File with interlaced reads for high-quality mate-pair library number `<#>` (`<#>` = 1,2,..,9). + +`--hqmp<#>-1 ` +    File with left reads for high-quality mate-pair library number `<#>` (`<#>` = 1,2,..,9). + +`--hqmp<#>-2 ` +    File with right reads for high-quality mate-pair library number `<#>` (`<#>` = 1,2,..,9). + +`--hqmp<#>-s ` +    File with unpaired reads from high-quality mate-pair library number `<#>` (`<#>` = 1,2,..,9) + +`--hqmp<#>- ` +    Orientation of reads for high-quality mate-pair library number `<#>` (`<#>` = 1,2,..,9; `` = "fr","rf","ff"). +    The default orientation for high-quality mate-pair libraries is forward-reverse (`--> <--`). For example, to specify reverse-forward orientation for the first high-quality mate-pair library, you should use the flag: `--hqmp1-rf ` + + +**_Lucigen NxSeq® Long Mate Pair libraries_** (see [section 3.1](#sec3.1) for details) + +`--nxmate<#>-1 ` +    File with left reads for Lucigen NxSeq® Long Mate Pair library number `<#>` (`<#>` = 1,2,..,9). + +`--nxmate<#>-2 ` +    File with right reads for Lucigen NxSeq® Long Mate Pair library number `<#>` (`<#>` = 1,2,..,9). + +**_Specifying data for hybrid assembly_** + +`--pacbio ` +    File with PacBio CLR reads. For PacBio CCS reads use `-s` option. More information on PacBio reads is provided in [section 3.1](#pacbio). + +`--nanopore ` +    File with Oxford Nanopore reads. + +`--sanger ` +    File with Sanger reads + +`--trusted-contigs ` +    Reliable contigs of the same genome, which are likely to have no misassemblies and small rate of other errors (e.g. mismatches and indels). This option is not intended for contigs of the related species. + +`--untrusted-contigs ` +    Contigs of the same genome, quality of which is average or unknown. Contigs of poor quality can be used but may introduce errors in the assembly. This option is also not intended for contigs of the related species. + + +**_Specifying input data with YAML data set file (advanced)_** + +An alternative way to specify an input data set for SPAdes is to create a [YAML](http://www.yaml.org/) data set file. By using a YAML file you can provide an unlimited number of paired-end, mate-pair and unpaired libraries. Basically, YAML data set file is a text file, in which input libraries are provided as a comma-separated list in square brackets. Each library is provided in braces as a comma-separated list of attributes. The following attributes are available: + +- orientation ("fr", "rf", "ff") +- type ("paired-end", "mate-pairs", "hq-mate-pairs", "single", "pacbio", "nanopore", "sanger", "trusted-contigs", "untrusted-contigs") +- interlaced reads (comma-separated list of files with interlaced reads) +- left reads (comma-separated list of files with left reads) +- right reads (comma-separated list of files with right reads) +- single reads (comma-separated list of files with single reads or unpaired reads from paired library) +- merged reads (comma-separated list of files with [merged reads](#merged)) + +To properly specify a library you should provide its type and at least one file with reads. Orientation is an optional attribute. Its default value is "fr" (forward-reverse) for paired-end libraries and "rf" (reverse-forward) for mate-pair libraries. + +The value for each attribute is given after a colon. Comma-separated lists of files should be given in square brackets. For each file you should provide its full path in double quotes. Make sure that files with right reads are given in the same order as corresponding files with left reads. + +For example, if you have one paired-end library splitted into two pairs of files: + +``` bash + + lib_pe1_left_1.fastq + lib_pe1_right_1.fastq + lib_pe1_left_2.fastq + lib_pe1_right_2.fastq +``` + +one mate-pair library: + +``` bash + + lib_mp1_left.fastq + lib_mp1_right.fastq +``` + +and PacBio CCS and CLR reads: + +``` bash + + pacbio_ccs.fastq + pacbio_clr.fastq +``` + +YAML file should look like this: + +``` bash + + [ + { + orientation: "fr", + type: "paired-end", + right reads: [ + "/FULL_PATH_TO_DATASET/lib_pe1_right_1.fastq", + "/FULL_PATH_TO_DATASET/lib_pe1_right_2.fastq" + ], + left reads: [ + "/FULL_PATH_TO_DATASET/lib_pe1_left_1.fastq", + "/FULL_PATH_TO_DATASET/lib_pe1_left_2.fastq" + ] + }, + { + orientation: "rf", + type: "mate-pairs", + right reads: [ + "/FULL_PATH_TO_DATASET/lib_mp1_right.fastq" + ], + left reads: [ + "/FULL_PATH_TO_DATASET/lib_mp1_left.fastq" + ] + }, + { + type: "single", + single reads: [ + "/FULL_PATH_TO_DATASET/pacbio_ccs.fastq" + ] + }, + { + type: "pacbio", + single reads: [ + "/FULL_PATH_TO_DATASET/pacbio_clr.fastq" + ] + } + ] +``` + +Once you have created a YAML file save it with `.yaml` extension (e.g. as `my_data_set.yaml`) and run SPAdes using the `--dataset` option: +`--dataset ` +Notes: + +- The `--dataset` option cannot be used with any other options for specifying input data. +- We recommend to nest all files with long reads of the same data type in a single library block. + +[]() + + +### Advanced options + +`-t ` (or `--threads `) +    Number of threads. The default value is 16. + +`-m ` (or `--memory `) +    Set memory limit in Gb. SPAdes terminates if it reaches this limit. The default value is 250 Gb. Actual amount of consumed RAM will be below this limit. Make sure this value is correct for the given machine. SPAdes uses the limit value to automatically determine the sizes of various buffers, etc. + +`--tmp-dir ` +    Set directory for temporary files from read error correction. The default value is `/corrected/tmp` + +`-k ` +    Comma-separated list of k-mer sizes to be used (all values must be odd, less than 128 and listed in ascending order). If `--sc` is set the default values are 21,33,55. For multicell data sets K values are automatically selected using maximum read length ([see note for assembling long Illumina paired reads for details](#sec3.4)). To properly select K values for IonTorrent data read [section 3.3](#sec3.3). + +`--cov-cutoff ` +    Read coverage cutoff value. Must be a positive float value, or "auto", or "off". Default value is "off". When set to "auto" SPAdes automatically computes coverage threshold using conservative strategy. Note, that this option is not supported by metaSPAdes. + +`--phred-offset <33 or 64>` +    PHRED quality offset for the input reads, can be either 33 or 64. It will be auto-detected if it is not specified. + + + +### Examples + +To test the toy data set, you can also run the following command from the SPAdes `bin` directory: + +``` bash + + spades.py --pe1-1 ../share/spades/test_dataset/ecoli_1K_1.fq.gz \ + --pe1-2 ../share/spades/test_dataset/ecoli_1K_2.fq.gz -o spades_test +``` + +If you have your library separated into several pairs of files, for example: + +``` bash + + lib1_forward_1.fastq + lib1_reverse_1.fastq + lib1_forward_2.fastq + lib1_reverse_2.fastq +``` + +make sure that corresponding files are given in the same order: + +``` bash + + spades.py --pe1-1 lib1_forward_1.fastq --pe1-2 lib1_reverse_1.fastq \ + --pe1-1 lib1_forward_2.fastq --pe1-2 lib1_reverse_2.fastq \ + -o spades_output +``` + +Files with interlacing paired-end reads or files with unpaired reads can be specified in any order with one file per option, for example: + +``` bash + + spades.py --pe1-12 lib1_1.fastq --pe1-12 lib1_2.fastq \ + --pe1-s lib1_unpaired_1.fastq --pe1-s lib1_unpaired_2.fastq \ + -o spades_output +``` + +If you have several paired-end and mate-pair reads, for example: + +paired-end library 1 + +``` bash + + lib_pe1_left.fastq + lib_pe1_right.fastq +``` + +mate-pair library 1 + +``` bash + + lib_mp1_left.fastq + lib_mp1_right.fastq +``` + +mate-pair library 2 + +``` bash + + lib_mp2_left.fastq + lib_mp2_right.fastq +``` + +make sure that files corresponding to each library are grouped together: + +``` bash + + spades.py --pe1-1 lib_pe1_left.fastq --pe1-2 lib_pe1_right.fastq \ + --mp1-1 lib_mp1_left.fastq --mp1-2 lib_mp1_right.fastq \ + --mp2-1 lib_mp2_left.fastq --mp2-2 lib_mp2_right.fastq \ + -o spades_output +``` + +If you have IonTorrent unpaired reads, PacBio CLR and additional reliable contigs: + +``` bash + + it_reads.fastq + pacbio_clr.fastq + contigs.fasta +``` + +run SPAdes with the following command: + +``` bash + + spades.py --iontorrent -s it_reads.fastq \ + --pacbio pacbio_clr.fastq --trusted-contigs contigs.fastq \ + -o spades_output +``` + +If a single-read library is splitted into several files: + +``` bash + + unpaired1_1.fastq + unpaired1_2.fastq + unpaired1_3.fasta +``` + +specify them as one library: + +``` bash + + spades.py --s1 unpaired1_1.fastq \ + --s1 unpaired1_2.fastq --s1 unpaired1_3.fastq \ + -o spades_output +``` + +All options for specifying input data can be mixed if needed, but make sure that files for each library are grouped and files with left and right paired reads are listed in the same order. []() + + +## Assembling IonTorrent reads + +Only FASTQ or BAM files are supported as input. + +The selection of k-mer length is non-trivial for IonTorrent. If the dataset is more or less conventional (good coverage, not high GC, etc), then use our [recommendation for long reads](#sec3.4) (e.g. assemble using k-mer lengths 21,33,55,77,99,127). However, due to increased error rate some changes of k-mer lengths (e.g. selection of shorter ones) may be required. For example, if you ran SPAdes with k-mer lengths 21,33,55,77 and then decided to assemble the same data set using more iterations and larger values of K, you can run SPAdes once again specifying the same output folder and the following options: `--restart-from k77 -k 21,33,55,77,99,127 --mismatch-correction -o `. Do not forget to copy contigs and scaffolds from the previous run. We are planning to tackle issue of selecting k-mer lengths for IonTorrent reads in next versions. + +You may need no error correction for Hi-Q enzyme at all. However, we suggest trying to assemble your data with and without error correction and select the best variant. + +For non-trivial datasets (e.g. with high GC, low or uneven coverage) we suggest to enable single-cell mode (setting `--sc` option) and use k-mer lengths of 21,33,55. []() + + +## Assembling long Illumina paired reads (2x150 and 2x250) + +Recent advances in DNA sequencing technology have led to a rapid increase in read length. Nowadays, it is a common situation to have a data set consisting of 2x150 or 2x250 paired-end reads produced by Illumina MiSeq or HiSeq2500. However, the use of longer reads alone will not automatically improve assembly quality. An assembler that can properly take advantage of them is needed. + +SPAdes use of iterative k-mer lengths allows benefiting from the full potential of the long paired-end reads. Currently one has to set the assembler options up manually, but we plan to incorporate automatic calculation of necessary options soon. + +Please note that in addition to the read length, the insert length also matters a lot. It is not recommended to sequence a 300bp fragment with a pair of 250bp reads. We suggest using 350-500 bp fragments with 2x150 reads and 550-700 bp fragments with 2x250 reads. + +### Multi-cell data set with read length 2x150 + +Do not turn off SPAdes error correction (BayesHammer module), which is included in SPAdes default pipeline. + +If you have enough coverage (50x+), then you may want to try to set k-mer lengths of 21, 33, 55, 77 (selected by default for reads with length 150bp). + +Make sure you run assembler with the `--careful` option to minimize number of mismatches in the final contigs. + +We recommend that you check the SPAdes log file at the end of the each iteration to control the average coverage of the contigs. + +For reads corrected prior to running the assembler: + +``` bash + + spades.py -k 21,33,55,77 --careful --only-assembler -o spades_output +``` + +To correct and assemble the reads: + +``` bash + + spades.py -k 21,33,55,77 --careful -o spades_output +``` + +### Multi-cell data set with read lengths 2 x 250 + +Do not turn off SPAdes error correction (BayesHammer module), which is included in SPAdes default pipeline. + +By default we suggest to increase k-mer lengths in increments of 22 until the k-mer length reaches 127. The exact length of the k-mer depends on the coverage: k-mer length of 127 corresponds to 50x k-mer coverage and higher. For read length 250bp SPAdes automatically chooses K values equal to 21, 33, 55, 77, 99, 127. + +Make sure you run assembler with `--careful` option to minimize number of mismatches in the final contigs. + +We recommend you to check the SPAdes log file at the end of the each iteration to control the average coverage of the contigs. + +For reads corrected prior to running the assembler: + +``` bash + + spades.py -k 21,33,55,77,99,127 --careful --only-assembler -o spades_output +``` + +To correct and assemble the reads: + +``` bash + + spades.py -k 21,33,55,77,99,127 --careful -o spades_output +``` + +### Single-cell data set with read lengths 2 x 150 or 2 x 250 + +The default k-mer lengths are recommended. For single-cell data sets SPAdes selects k-mer sizes 21, 33 and 55. + +However, it might be tricky to fully utilize the advantages of long reads you have. Consider contacting us for more information and to discuss assembly strategy. +[]() + + +## SPAdes output + +SPAdes stores all output files in ` `, which is set by the user. + +- `/corrected/` directory contains reads corrected by BayesHammer in `*.fastq.gz` files; if compression is disabled, reads are stored in uncompressed `*.fastq` files +- `/scaffolds.fasta` contains resulting scaffolds (recommended for use as resulting sequences) +- `/contigs.fasta` contains resulting contigs +- `/assembly_graph.gfa` contains SPAdes assembly graph and scaffolds paths in [GFA 1.0 format](https://github.com/GFA-spec/GFA-spec/blob/master/GFA1.md) +- `/assembly_graph.fastg` contains SPAdes assembly graph in [FASTG format](http://fastg.sourceforge.net/FASTG_Spec_v1.00.pdf) +- `/contigs.paths` contains paths in the assembly graph corresponding to contigs.fasta (see details below) +- `/scaffolds.paths` contains paths in the assembly graph corresponding to scaffolds.fasta (see details below) + +Contigs/scaffolds names in SPAdes output FASTA files have the following format: +`>NODE_3_length_237403_cov_243.207` +Here `3` is the number of the contig/scaffold, `237403` is the sequence length in nucleotides and `243.207` is the k-mer coverage for the last (largest) k value used. Note that the k-mer coverage is always lower than the read (per-base) coverage. + +In general, SPAdes uses two techniques for joining contigs into scaffolds. First one relies on read pairs and tries to estimate the size of the gap separating contigs. The second one relies on the assembly graph: e.g. if two contigs are separated by a complex tandem repeat, that cannot be resolved exactly, contigs are joined into scaffold with a fixed gap size of 100 bp. Contigs produced by SPAdes do not contain N symbols. + +To view FASTG and GFA files we recommend to use [Bandage visualization tool](http://rrwick.github.io/Bandage/). Note that sequences stored in `assembly_graph.fastg` correspond to contigs before repeat resolution (edges of the assembly graph). Paths corresponding to contigs after repeat resolution (scaffolding) are stored in `contigs.paths` (`scaffolds.paths`) in the format accepted by Bandage (see [Bandage wiki](https://github.com/rrwick/Bandage/wiki/Graph-paths) for details). The example is given below. + +Let the contig with the name `NODE_5_length_100000_cov_215.651` consist of the following edges of the assembly graph: + +``` plain + >EDGE_2_length_33280_cov_199.702 + >EDGE_5_length_84_cov_321.414" + >EDGE_3_length_111_cov_175.304 + >EDGE_5_length_84_cov_321.414" + >EDGE_4_length_66661_cov_223.548 +``` + +Then, `contigs.paths` will contain the following record: + +``` plain + NODE_5_length_100000_cov_215.651 + 2+,5-,3+,5-,4+ +``` + + +Since the current version of Bandage does not accept paths with gaps, paths corresponding contigs/scaffolds jumping over a gap in the assembly graph are splitted by semicolon at the gap positions. For example, the following record + +``` plain + NODE_3_length_237403_cov_243.207 + 21-,17-,15+,17-,16+; + 31+,23-,22+,23-,4- +``` + +states that `NODE_3_length_237403_cov_243.207` corresponds to the path with 10 edges, but jumps over a gap between edges `EDGE_16_length_21503_cov_482.709` and `EDGE_31_length_140767_cov_220.239`. + +The full list of `` content is presented below: + +- scaffolds.fasta – resulting scaffolds (recommended for use as resulting sequences) +- contigs.fasta – resulting contigs +- assembly_graph.fastg – assembly graph +- contigs.paths – contigs paths in the assembly graph +- scaffolds.paths – scaffolds paths in the assembly graph +- before_rr.fasta – contigs before repeat resolution + +- corrected/ – files from read error correction + - configs/ – configuration files for read error correction + - corrected.yaml – internal configuration file + - Output files with corrected reads + +- params.txt – information about SPAdes parameters in this run +- spades.log – SPAdes log +- dataset.info – internal configuration file +- input_dataset.yaml – internal YAML data set file +- K<##>/ – directory containing intermediate files from the run with K=<##>. These files should not be used as assembly results; use resulting contigs/scaffolds in files mentioned above. + + +SPAdes will overwrite these files and directories if they exist in the specified ``. []() + + +## plasmidSPAdes output + +plasmidSPAdes outputs only DNA sequences from putative plasmids. Output file names and formats remain the same as in SPAdes (see [previous](#sec3.5) section), with the following difference. For all contig names in `contigs.fasta`, `scaffolds.fasta` and `assembly_graph.fastg` we append suffix `_component_X`, where `X` is the id of the putative plasmid, which the contig belongs to. Note that plasmidSPAdes may not be able to separate similar plasmids and thus their contigs may appear with the same id. []() + + +## biosyntheticSPAdes output + +biosyntheticSPAdes outputs three files of interest: +- gene_clusters.fasta – contains DNA sequences from putative biosynthetic gene clusters (BGC). Since eash sample may contain multiple BGCs and biosyntheticSPAdes can output several putative DNA sequences for eash cluster, for each contig name we append suffix `_cluster_X_candidate_Y`, where X is the id of the BGC and Y is the id of the candidate from the BGC. +- bgc_statistics.txt – contains statistics about BGC composition in the sample. First, it outputs number of domain hits in the sample. Then, for each BGC candidate we output domain order with positions on the corresponding DNA sequence from gene_clusters.fasta. +- domain_graph.dot – contains domain graph structure, that can be used to assess complexity of the sample and structure of BGCs. For more information about domain graph construction, please refer to the paper. + + + +## Assembly evaluation + +[QUAST](http://cab.spbu.ru/software/quast/) may be used to generate summary statistics (N50, maximum contig length, GC %, \# genes found in a reference list or with built-in gene finding tools, etc.) for a single assembly. It may also be used to compare statistics for multiple assemblies of the same data set (e.g., SPAdes run with different parameters, or several different assemblers). +[]() + + + +# Stand-alone binaries released within SPAdes package + + +## k-mer counting + +To provide input data to SPAdes k-mer counting tool `spades-kmercounter ` you may just specify files in [SPAdes-supported formats](#sec3.1) without any flags (after all options) or provide dataset description file in [YAML format](#yaml). + +Output: /final_kmers - unordered set of kmers in binary format. Kmers from both forward a +nd reverse-complementary reads are taken into account. + +Output format: All kmers are written sequentially without any separators. Each kmer takes the same nu +mber of bits. One kmer of length K takes 2*K bits. Kmers are aligned by 64 bits. For example, one kme +r with length=21 takes 8 bytes, with length=33 takes 16 bytes, and with length=55 takes 16 bytes. Eac +h nucleotide is coded with 2 bits: 00 - A, 01 - C, 10 - G, 11 - T. + +Example: + + For kmer: AGCTCT + Memory: 6 bits * 2 = 12, 64 bits (8 bytes) + Let’s describe bytes: + data[0] = AGCT -> 11 01 10 00 -> 0xd8 + data[1] = CT00 -> 00 00 11 01 -> 0x0d + data[2] = 0000 -> 00 00 00 00 -> 0x00 + data[3] = 0000 -> 00 00 00 00 -> 0x00 + data[4] = 0000 -> 00 00 00 00 -> 0x00 + data[5] = 0000 -> 00 00 00 00 -> 0x00 + data[6] = 0000 -> 00 00 00 00 -> 0x00 + data[7] = 0000 -> 00 00 00 00 -> 0x00 + +Synopsis: `spades-kmercount [OPTION...] ` + +The options are: + +`-d, --dataset file ` + dataset description (in YAML format), input files ignored + +`-k, --kmer ` + k-mer length (default: 21) + +`-t, --threads ` + number of threads to use (default: number of CPUs) + +`-w, --workdir

    ` + working directory to use (default: current directory) + +`-b, --bufsize ` + sorting buffer size in bytes, per thread (default 536870912) + +`-h, --help ` + print help message + + + +## k-mer coverage read filter + +`spades-read-filter` is a tool for filtering reads with median kmer coverage less than threshold. + +To provide input data to SPAdes k-mer read filter tool `spades-read-filter ` you should provide dataset description file in [YAML format](#yaml). + +Synopsis: `spades-read-filter [OPTION...] -d ` + +The options are: + +`-d, --dataset file ` + dataset description (in YAML format) + +`-k, --kmer ` + k-mer length (default: 21) + +`-t, --threads ` + number of threads to use (default: number of CPUs) + +`-o, --outdir ` + output directory to use (default: current directory) + +`-c, --cov ` + median kmer count threshold (read pairs, s.t. kmer count median for BOTH reads LESS OR EQUAL to this value will be ignored) + +`-h, --help ` + print help message + + +## k-mer cardinality estimating + +`spades-kmer-estimating ` is a tool for estimating approximate number of unique k-mers in the provided reads. Kmers from reverse-complementary reads aren"t taken into account for k-mer cardinality estimating. + +To provide input data to SPAdes k-mer cardinality estimating tool `spades-kmer-estimating ` you should provide dataset description file in [YAML format](#yaml). + +Synopsis: `spades-kmer-estimating [OPTION...] -d ` + +The options are: + +`-d, --dataset file ` + dataset description (in YAML format) + +`-k, --kmer ` + k-mer length (default: 21) + +`-t, --threads ` + number of threads to use (default: number of CPUs) + +`-h, --help ` + print help message + + +## Graph construction +Graph construction tool `spades-gbuilder ` has two mandatory options: dataset description file in [YAML format](#yaml) and an output file name. + +Synopsis: `spades-gbuilder [-k ] [-t ] [-tmpdir ] [-b ] [-unitigs|-fastg|-gfa|-spades]` + +Additional options are: + +`-k ` + k-mer length used for construction (must be odd) + +`-t ` + number of threads + +`-tmp-dir ` + scratch directory to use + +`-b ` + sorting buffer size (per thread, in bytes) + +`-unitigs ` + k-mer length used for construction (must be odd) + +`-fastg ` + output graph in FASTG format + +`-gfa ` + output graph in GFA1 format + +`-spades ` + output graph in SPAdes internal format + + + +## Long read to graph alignment + + +### hybridSPAdes aligner +A tool `spades-gmapper ` gives opportunity to extract long read alignments generated with hybridSPAdes pipeline options. It has three mandatory options: dataset description file in [YAML format](#yaml), graph file in GFA format and an output file name. + +Synopsis: `spades-gmapper [-k ] [-t ] [-tmpdir ]` + +Additional options are: + +`-k ` + k-mer length that was used for graph construction + +`-t ` + number of threads + +`-tmpdir ` + scratch directory to use + +While `spades-mapper` is a solution for those who works on hybridSPAdes assembly and wants to get intermediate results, [SPAligner](#sec4.5.2) is an end-product application for sequence-to-graph alignment with tunable parameters and output types. + + + +### SPAligner +A tool for fast and accurate alignment of nucleotide sequences to assembly graphs. It takes file with sequences (in fasta/fastq format) and assembly in GFA format and outputs long read to graph alignment in various formats (such as tsv, fasta and [GPA](https://github.com/ocxtal/gpa "GPA-format spec")). + +Synopsis: `spaligner assembly/src/projects/spaligner_config.yaml -d -s -g -k [-t ] [-o ]` + +Parameters are: + +`-d ` + long reads type: nanopore, pacbio + +`-s ` + file with sequences (in fasta/fastq) + +`-g ` + file with graph (in GFA) + +`-k ` + k-mer length that was used for graph construction + +`-t ` + number of threads (default: 8) + +`-o, --outdir ` + output directory to use (default: spaligner_result/) + +For more information on parameters and options please refer to main SPAligner manual (assembler/src/projects/spaligner/README.md). + +Also if you want to align protein sequences please refer to our [pre-release version](https://github.com/ablab/spades/releases/tag/spaligner-paper). + + + +# Citation +If you use SPAdes in your research, please include [Nurk, Bankevich et al., 2013](http://link.springer.com/chapter/10.1007%2F978-3-642-37195-0_13) in your reference list. You may also add [Bankevich, Nurk et al., 2012](http://online.liebertpub.com/doi/abs/10.1089/cmb.2012.0021) instead. + +In case you perform hybrid assembly ussing PacBio or Nanopore reads, you may also cite [Antipov et al., 2015](http://bioinformatics.oxfordjournals.org/content/early/2015/11/20/bioinformatics.btv688.short). + +If you use multiple paired-end and/or mate-pair libraries you may also cite papers describing SPAdes repeat resolution algorithms [Prjibelski et al., 2014](http://bioinformatics.oxfordjournals.org/content/30/12/i293.short) and [Vasilinetc et al., 2015](http://bioinformatics.oxfordjournals.org/content/31/20/3262.abstract). + +If you use metaSPAdes please cite [Antipov et al., 2016](https://genome.cshlp.org/content/27/5/824.short). + +If you use plasmidSPAdes please cite [Antipov et al., 2016](https://academic.oup.com/bioinformatics/article/32/22/3380/2525610). + +For rnaSPAdes citation use [Bushmanova et al., 2019](https://academic.oup.com/gigascience/article/8/9/giz100/5559527). + +If you use biosyntheticSPAdes please cite [Meleshko et al., 2019](https://genome.cshlp.org/content/early/2019/06/03/gr.243477.118?top=1). + +In addition, we would like to list your publications that use our software on our website. Please email the reference, the name of your lab, department and institution to . +[]() + + +# Feedback and bug reports + +Your comments, bug reports, and suggestions are very welcomed. They will help us to further improve SPAdes. If you have any troubles running SPAdes, please send us `params.txt` and `spades.log` from the directory ``. + +You can leave your comments and bug reports at [our GitHub repository tracker](https://github.com/ablab/spades/issues) or sent it via e-mail: . + diff --git a/src/SPAdes-3.14.0-Linux/share/spades/VERSION b/src/SPAdes-3.14.0-Linux/share/spades/VERSION new file mode 100644 index 0000000..f982feb --- /dev/null +++ b/src/SPAdes-3.14.0-Linux/share/spades/VERSION @@ -0,0 +1 @@ +3.14.0 diff --git a/src/SPAdes-3.14.0-Linux/share/spades/biosynthetic_spades_hmms/AMP.hmm.gz b/src/SPAdes-3.14.0-Linux/share/spades/biosynthetic_spades_hmms/AMP.hmm.gz new file mode 100644 index 0000000..30233a3 Binary files /dev/null and b/src/SPAdes-3.14.0-Linux/share/spades/biosynthetic_spades_hmms/AMP.hmm.gz differ diff --git a/src/SPAdes-3.14.0-Linux/share/spades/biosynthetic_spades_hmms/AT.hmm.gz b/src/SPAdes-3.14.0-Linux/share/spades/biosynthetic_spades_hmms/AT.hmm.gz new file mode 100644 index 0000000..85de298 Binary files /dev/null and b/src/SPAdes-3.14.0-Linux/share/spades/biosynthetic_spades_hmms/AT.hmm.gz differ diff --git a/src/SPAdes-3.14.0-Linux/share/spades/biosynthetic_spades_hmms/CStart.hmm.gz b/src/SPAdes-3.14.0-Linux/share/spades/biosynthetic_spades_hmms/CStart.hmm.gz new file mode 100644 index 0000000..104a68b Binary files /dev/null and b/src/SPAdes-3.14.0-Linux/share/spades/biosynthetic_spades_hmms/CStart.hmm.gz differ diff --git a/src/SPAdes-3.14.0-Linux/share/spades/biosynthetic_spades_hmms/KR.hmm.gz b/src/SPAdes-3.14.0-Linux/share/spades/biosynthetic_spades_hmms/KR.hmm.gz new file mode 100644 index 0000000..eb76ade Binary files /dev/null and b/src/SPAdes-3.14.0-Linux/share/spades/biosynthetic_spades_hmms/KR.hmm.gz differ diff --git a/src/SPAdes-3.14.0-Linux/share/spades/biosynthetic_spades_hmms/KS.hmm.gz b/src/SPAdes-3.14.0-Linux/share/spades/biosynthetic_spades_hmms/KS.hmm.gz new file mode 100644 index 0000000..59bc846 Binary files /dev/null and b/src/SPAdes-3.14.0-Linux/share/spades/biosynthetic_spades_hmms/KS.hmm.gz differ diff --git a/src/SPAdes-3.14.0-Linux/share/spades/biosynthetic_spades_hmms/TE.hmm.gz b/src/SPAdes-3.14.0-Linux/share/spades/biosynthetic_spades_hmms/TE.hmm.gz new file mode 100644 index 0000000..8eaa364 Binary files /dev/null and b/src/SPAdes-3.14.0-Linux/share/spades/biosynthetic_spades_hmms/TE.hmm.gz differ diff --git a/src/SPAdes-3.10.1-Linux/share/spades/configs/corrector/corrector.info b/src/SPAdes-3.14.0-Linux/share/spades/configs/corrector/corrector.info similarity index 76% rename from src/SPAdes-3.10.1-Linux/share/spades/configs/corrector/corrector.info rename to src/SPAdes-3.14.0-Linux/share/spades/configs/corrector/corrector.info index 22740a3..d0373f1 100644 --- a/src/SPAdes-3.10.1-Linux/share/spades/configs/corrector/corrector.info +++ b/src/SPAdes-3.14.0-Linux/share/spades/configs/corrector/corrector.info @@ -3,5 +3,6 @@ dataset: ./configs/debruijn/datasets/ECOLI_IS220_QUAKE.yaml, work_dir: ./test_dataset/input/corrected/tmp, output_dir: ./test_dataset/input/corrected, max_nthreads: 16, -strategy: mapped_squared +strategy: mapped_squared, +log_filename: log.properties } diff --git a/src/SPAdes-3.14.0-Linux/share/spades/configs/debruijn/bgc_mode.info b/src/SPAdes-3.14.0-Linux/share/spades/configs/debruijn/bgc_mode.info new file mode 100644 index 0000000..7826875 --- /dev/null +++ b/src/SPAdes-3.14.0-Linux/share/spades/configs/debruijn/bgc_mode.info @@ -0,0 +1,2 @@ +mode bgc +set_of_hmms none \ No newline at end of file diff --git a/src/SPAdes-3.14.0-Linux/share/spades/configs/debruijn/careful_mda_mode.info b/src/SPAdes-3.14.0-Linux/share/spades/configs/debruijn/careful_mda_mode.info new file mode 100644 index 0000000..6935014 --- /dev/null +++ b/src/SPAdes-3.14.0-Linux/share/spades/configs/debruijn/careful_mda_mode.info @@ -0,0 +1,40 @@ +simp +{ + ; bulge remover: + br + { + enabled true + max_relative_coverage 1.1 ; bulge_cov < this * not_bulge_cov + } + + ; complex bulge remover + cbr + { + enabled false + } + + final_tc + { + condition "" + } + + ; bulge remover: + final_br + { + enabled false + } + + init_clean + { + early_it_only true + + activation_cov -1. + ier + { + enabled false + } + + tip_condition "" + ec_condition "" + } +} diff --git a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/careful_mode.info b/src/SPAdes-3.14.0-Linux/share/spades/configs/debruijn/careful_mode.info similarity index 71% rename from src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/careful_mode.info rename to src/SPAdes-3.14.0-Linux/share/spades/configs/debruijn/careful_mode.info index 5cbb786..5379088 100644 --- a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/careful_mode.info +++ b/src/SPAdes-3.14.0-Linux/share/spades/configs/debruijn/careful_mode.info @@ -1,12 +1,11 @@ - simp { ; bulge remover: br { - max_coverage 1000000.0 - max_relative_coverage 1.5 ; bulge_cov < this * not_bulge_cov - parallel false + enabled true + max_relative_coverage 0.5 ; bulge_cov < this * not_bulge_cov + ; parallel false } ; complex bulge remover @@ -15,6 +14,12 @@ simp enabled false } + ; bulge remover: + final_br + { + enabled false + } + ; relative coverage erroneous component remover: rcc { diff --git a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/config.info b/src/SPAdes-3.14.0-Linux/share/spades/configs/debruijn/config.info similarity index 80% rename from src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/config.info rename to src/SPAdes-3.14.0-Linux/share/spades/configs/debruijn/config.info index df5179a..67b60e3 100644 --- a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/config.info +++ b/src/SPAdes-3.14.0-Linux/share/spades/configs/debruijn/config.info @@ -11,13 +11,10 @@ K 55 ;FIXME introduce isolate mode mode base -;FIXME remove! -run_mode false -project_name TOY_DATASET -dataset ./configs/debruijn/datasets_archive/toy.info +dataset ./configs/debruijn/toy.info log_filename log.properties -output_base ./data/debruijn/ +output_base ./spades_output tmp_dir spades_tmp/ main_iteration true @@ -30,20 +27,24 @@ load_from latest/saves/ ; tmp or latest temp_bin_reads_dir .bin_reads/ max_threads 8 max_memory 120; in Gigabytes -buffer_size 512; in Megabytes +buffer_size 512; in Megabytes -entry_point construction +entry_point read_conversion +;entry_point construction ;entry_point simplification -;entry_point pacbio_aligning +;entry_point hybrid_aligning ;entry_point late_pair_info_count ;entry_point distance_estimation ;entry_point repeat_resolving +checkpoints none developer_mode true scaffold_correction_mode false ; enabled (1) or disabled (0) repeat resolution (former "paired_mode") rr_enable true +; 0 for graph N50 +min_edge_length_for_is_count 0 ;preserve raw paired index after distance estimation preserve_raw_paired_index false @@ -69,9 +70,6 @@ compute_paths_number false ; End of developer_mode parameters -; use unipaths as additional contigs instead of just graph edges -use_unipaths false - ;if true simple mismatches are corrected correct_mismatches true @@ -81,10 +79,6 @@ paired_info_statistics false ; set it true to get statistics for pair information (over gaps), such as false positive/negative, perfect match, etc. paired_info_scaffolder false -;FIXME is it always simple? -estimation_mode simple -; simple, weighted, extensive, smoothing - ;the only option left from repeat resolving max_repeat_length 8000 @@ -95,6 +89,12 @@ use_scaffolder true avoid_rc_connections true +calculate_coverage_for_each_lib false +strand_specificity { + ss_enabled false + antisense false +} + contig_output { contigs_name final_contigs scaffolds_name scaffolds @@ -120,8 +120,10 @@ gap_closer_enable true gap_closer { minimal_intersection 10 - before_simplify true - in_simplify false + + ;before_raw_simplify and before_simplify are mutually exclusive + before_raw_simplify true + before_simplify false after_simplify true weight_threshold 2.0 } @@ -133,20 +135,25 @@ kmer_coverage_model { coverage_threshold 10.0 } +; low covered edges remover +lcer +{ + lcer_enabled false + lcer_coverage_threshold 0.0 +} + pacbio_processor { + internal_length_cutoff 200 ;align and traverse. - pacbio_k 13 - additional_debug_info false compression_cutoff 0.6 - domination_cutoff 1.5 path_limit_stretching 1.3 path_limit_pressing 0.7 - ignore_middle_alignment true max_path_in_dijkstra 15000 max_vertex_in_dijkstra 2000 ;gap_closer long_seq_limit 400 + enable_gap_closing true pacbio_min_gap_quantity 2 contigs_min_gap_quantity 1 max_contigs_gap_length 10000 @@ -162,13 +169,20 @@ graph_read_corr bwa_aligner { - ;stupid naming since spades.py cannot change config normally - bwa_enable false debug false - path_to_bwa ./bin/bwa-spades min_contig_len 0 } ;flanking coverage range flanking_range 55 series_analysis "" +save_gp false + +ss_coverage_splitter { + enabled false + bin_size 50 + min_edge_len 200 + min_edge_coverage 5 + min_flanking_coverage 2 + coverage_margin 5 +} diff --git a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/construction.info b/src/SPAdes-3.14.0-Linux/share/spades/configs/debruijn/construction.info similarity index 88% rename from src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/construction.info rename to src/SPAdes-3.14.0-Linux/share/spades/configs/debruijn/construction.info index f3d1b2c..27bb9ac 100644 --- a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/construction.info +++ b/src/SPAdes-3.14.0-Linux/share/spades/configs/debruijn/construction.info @@ -10,7 +10,10 @@ construction ; size of buffer for each thread in MB, 0 for autodetection read_buffer_size 0 - + + ; read median coverage threshold + read_cov_threshold 0 + early_tip_clipper { ; tip clipper can be enabled only in extension mode diff --git a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/detail_info_printer.info b/src/SPAdes-3.14.0-Linux/share/spades/configs/debruijn/detail_info_printer.info similarity index 100% rename from src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/detail_info_printer.info rename to src/SPAdes-3.14.0-Linux/share/spades/configs/debruijn/detail_info_printer.info diff --git a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/distance_estimation.info b/src/SPAdes-3.14.0-Linux/share/spades/configs/debruijn/distance_estimation.info similarity index 97% rename from src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/distance_estimation.info rename to src/SPAdes-3.14.0-Linux/share/spades/configs/debruijn/distance_estimation.info index 20954c6..22052fa 100644 --- a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/distance_estimation.info +++ b/src/SPAdes-3.14.0-Linux/share/spades/configs/debruijn/distance_estimation.info @@ -40,7 +40,3 @@ amb_de { relative_length_threshold 0.8 relative_seq_threshold 0.5 } - -sensitive_mapper { - k 19 -} diff --git a/src/SPAdes-3.14.0-Linux/share/spades/configs/debruijn/isolate_mode.info b/src/SPAdes-3.14.0-Linux/share/spades/configs/debruijn/isolate_mode.info new file mode 100644 index 0000000..32535af --- /dev/null +++ b/src/SPAdes-3.14.0-Linux/share/spades/configs/debruijn/isolate_mode.info @@ -0,0 +1,4 @@ +mode isolate + +#include "careful_mode.info" + diff --git a/src/SPAdes-3.14.0-Linux/share/spades/configs/debruijn/large_genome_mode.info b/src/SPAdes-3.14.0-Linux/share/spades/configs/debruijn/large_genome_mode.info new file mode 100644 index 0000000..35ed27a --- /dev/null +++ b/src/SPAdes-3.14.0-Linux/share/spades/configs/debruijn/large_genome_mode.info @@ -0,0 +1,11 @@ +;FIXME do we still need this mode? +mode large_genome + +pe { + +debug_output false + +params { + scaffolding_mode old_pe_2015 +} +} diff --git a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/mda_mode.info b/src/SPAdes-3.14.0-Linux/share/spades/configs/debruijn/mda_mode.info similarity index 71% rename from src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/mda_mode.info rename to src/SPAdes-3.14.0-Linux/share/spades/configs/debruijn/mda_mode.info index 11c9815..6a68158 100644 --- a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/mda_mode.info +++ b/src/SPAdes-3.14.0-Linux/share/spades/configs/debruijn/mda_mode.info @@ -39,7 +39,7 @@ simp rcc { enabled true - coverage_gap 20. + coverage_gap 10. max_length_coeff 2.0 max_length_with_tips_coeff 3.0 max_vertex_cnt 30 @@ -47,29 +47,6 @@ simp max_coverage_coeff 5.0 } - tec - { - max_ec_length_coefficient 55 ; max_ec_length = k + max_ec_length_coefficient - uniqueness_length 1500 - plausibility_length 200 - } - - ; topology and reliability based erroneous connection remover - trec - { - max_ec_length_coefficient 100 ; max_ec_length = k + max_ec_length_coefficient - uniqueness_length 1500 - unreliable_coverage 2.5 - } - - ; topology tip clipper: - ttc - { - length_coeff 3.5 - plausibility_length 250 - uniqueness_length 1500 - } - ; complex bulge remover cbr { @@ -82,7 +59,7 @@ simp enabled true uniqueness_length 1500 unreliability_threshold 0.2 - relative_threshold 5 + relative_threshold 5 } init_clean @@ -114,8 +91,7 @@ params { ; extension selection extension_options { - use_default_single_threshold false - single_threshold 0.001 + single_threshold 0.3 weight_threshold 0.6 max_repeat_length 8000 } diff --git a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/meta_mode.info b/src/SPAdes-3.14.0-Linux/share/spades/configs/debruijn/meta_mode.info similarity index 62% rename from src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/meta_mode.info rename to src/SPAdes-3.14.0-Linux/share/spades/configs/debruijn/meta_mode.info index 69c7bdc..6c556eb 100644 --- a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/meta_mode.info +++ b/src/SPAdes-3.14.0-Linux/share/spades/configs/debruijn/meta_mode.info @@ -2,12 +2,11 @@ mode meta ; two-step pipeline two_step_rr true +min_edge_length_for_is_count 900 + ; enables/disables usage of intermediate contigs in two-step pipeline use_intermediate_contigs true -;if true simple mismatches are corrected -correct_mismatches false - ;flanking coverage range flanking_range 30 @@ -32,7 +31,7 @@ simp { ; rctc: tip_cov < rctc * not_tip_cov ; tc_lb: max_tip_length = max((min(k, read_length / 2) * tc_lb), read_length); - condition "{ tc_lb 3.5, , cb 1000000, rctc 2.0 } { tc_lb 6., cb 2.5, rctc 1.0 }" + condition "{ rl 0.2 } { rlmk 2., rctc 2.0 }" } ; relative coverage erroneous component remover: @@ -54,28 +53,29 @@ simp } ; relative edge disconnector: - relative_ed + red { enabled true - diff_mult 10. + diff_mult 5. + unconditional_diff_mult 50. } - ; bulge remover: - br - { - max_coverage 1000000.0 - max_relative_coverage 100000. ; bulge_cov < this * not_bulge_cov - max_delta 10 - max_relative_delta 0.1 + ; bulge remover: + br + { + enabled true + max_coverage 1000000.0 + max_relative_coverage 5. ; bulge_cov < this * not_bulge_cov + max_delta 10 + max_relative_delta 0.1 + dijkstra_vertex_limit 3000 parallel true - } + } ; final tip clipper: final_tc { - ; rctc: tip_cov < rctc * not_tip_cov - ; tc_lb: max_tip_length = max((min(k, read_length / 2) * tc_lb), read_length); - condition "{ lb 500, cb 3., rctc 0.7 } { lb 1500, cb 20., rctc 0.2 }" + condition "{ lb 500, rctc 0.4 } { lb 850, rctc 0.2 }" } ; final bulge remover: @@ -83,25 +83,24 @@ simp { enabled true main_iteration_only true - max_bulge_length_coefficient 50. ; max_bulge_length = max_bulge_length_coefficient * k + max_bulge_length_coefficient 30. ; max_bulge_length = max_bulge_length_coefficient * k max_coverage 1000000.0 max_relative_coverage 0.5 ; bulge_cov < this * not_bulge_cov - max_delta 50 + max_delta 45 max_relative_delta 0.1 } - ; second final bulge remover: - ; only in meta mode, inherits settings of final_br - second_final_br + ; complex bulge remover + cbr { - max_delta 1500 - max_number_edges 3 + enabled true } ; hidden ec remover her { - enabled true + ; TODO NB config used in special meta mode version (always enabled) + enabled false uniqueness_length 1500 unreliability_threshold -1. relative_threshold 3. @@ -109,43 +108,49 @@ simp init_clean { - early_it_only true - ier { - enabled true - } - tip_condition "{ tc_lb 3.5, cb 2.0 }" - ec_condition "{ ec_lb 10, cb 0.5 }" + activation_cov -1. + early_it_only false + ier + { + enabled true + } + ;Disable if it does not help the br performance much! + tip_condition "{ tc_lb 3.5, cb 2.1 }" + ;ec_condition is here only to speed-up future br on early iterations + ec_condition "{ ec_lb 10, cb 1.5 }" + disconnect_flank_cov -1. } } -;FIXME rename +;TODO rename preliminary_simp { init_clean { - self_conj_condition "{ ec_lb 100, cb 20.0 }" - early_it_only false - ier - { - enabled true - } - tip_condition "{ rlmk, cb 1.2, mmm 2 }" + tip_condition "loop 2 { rlmk 1., cb 1.2, mmm 2 } { rlmk 1., cb 1.2, mmm 0.05 } { rl 0.2, cb 1.2 }" ec_condition "{ ec_lb 0, cb 0.9 }" disconnect_flank_cov 0.8 } - post_simplif_enabled false + ; bulge remover: + br + { + enabled true + max_coverage 1000000.0 + max_relative_coverage 0.5 ; bulge_cov < this * not_bulge_cov + max_delta 10 + max_relative_delta 0.1 + } - ; bulge remover: - br - { - max_coverage 1000000.0 - max_relative_coverage 0.5 ; bulge_cov < this * not_bulge_cov - max_delta 10 - max_relative_delta 0.1 - } - + ; Currently will not work even if enabled. Left for experiments. + ; relative edge disconnector + red + { + enabled false + diff_mult 10. + unconditional_diff_mult 100. + } } ; undo single cell config changes, enforce filtering @@ -169,8 +174,10 @@ long_reads { } params { - remove_overlaps true - cut_all_overlaps true + overlap_removal { + enabled true + cut_all true + } scaffolding_mode old_pe_2015 @@ -179,7 +186,6 @@ params { ; extension selection extension_options { - use_default_single_threshold true single_threshold 0.3 weight_threshold 0.6 priority_coeff 1.5 @@ -187,6 +193,12 @@ params { } use_coordinated_coverage true + + coordinated_coverage + { + min_path_len 10000 + } + } } @@ -195,6 +207,10 @@ prelim_pe { params { scaffolding_mode old + overlap_removal { + enabled false + } + use_coordinated_coverage false remove_overlaps false scaffolding2015 { diff --git a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/moleculo_mode.info b/src/SPAdes-3.14.0-Linux/share/spades/configs/debruijn/moleculo_mode.info similarity index 74% rename from src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/moleculo_mode.info rename to src/SPAdes-3.14.0-Linux/share/spades/configs/debruijn/moleculo_mode.info index a3ad118..aa1613c 100644 --- a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/moleculo_mode.info +++ b/src/SPAdes-3.14.0-Linux/share/spades/configs/debruijn/moleculo_mode.info @@ -16,6 +16,7 @@ simp ; bulge remover: br { + enabled true max_coverage 3 max_relative_coverage 100000. ; bulge_cov < this * not_bulge_cov } @@ -41,29 +42,6 @@ simp max_coverage_coeff 5.0 } - tec - { - max_ec_length_coefficient 55 ; max_ec_length = k + max_ec_length_coefficient - uniqueness_length 1500 - plausibility_length 200 - } - - ; topology and reliability based erroneous connection remover - trec - { - max_ec_length_coefficient 100 ; max_ec_length = k + max_ec_length_coefficient - uniqueness_length 1500 - unreliable_coverage 2.5 - } - - ; topology tip clipper: - ttc - { - length_coeff 3.5 - plausibility_length 250 - uniqueness_length 1500 - } - ; complex bulge remover cbr { @@ -85,8 +63,6 @@ simp init_clean { - early_it_only true - activation_cov -1. ier { @@ -101,15 +77,18 @@ simp pe { params { normalize_weight true - cut_all_overlaps true + + overlap_removal { + enabled true + cut_all true + } scaffolding_mode old ; extension selection extension_options { - use_default_single_threshold false - single_threshold 0.001 + single_threshold 0.3 weight_threshold 0.6 } diff --git a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/pe_params.info b/src/SPAdes-3.14.0-Linux/share/spades/configs/debruijn/pe_params.info similarity index 81% rename from src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/pe_params.info rename to src/SPAdes-3.14.0-Linux/share/spades/configs/debruijn/pe_params.info index 0d7a172..1b37060 100644 --- a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/pe_params.info +++ b/src/SPAdes-3.14.0-Linux/share/spades/configs/debruijn/pe_params.info @@ -18,17 +18,18 @@ params { multi_path_extend false ; old | 2015 | combined | old_pe_2015 scaffolding_mode old_pe_2015 + + overlap_removal { + enabled true + end_start_only false + cut_all false + } - remove_overlaps true - cut_all_overlaps false - - split_edge_length 99 normalize_weight true ; extension selection extension_options { - use_default_single_threshold true single_threshold 0.1 weight_threshold 0.5 priority_coeff 1.5 @@ -38,7 +39,6 @@ params { mate_pair_options { - use_default_single_threshold true single_threshold 30 weight_threshold 0.5 priority_coeff 1.5 @@ -50,7 +50,7 @@ params { enabled true cutoff 2 hard_cutoff 0 - rel_cutoff 0.1 + rel_cov_cutoff 0.0 sum_threshold 3 cluster_info true @@ -61,25 +61,20 @@ params { ;next param should be 0.51 - 1.0 if use_old_score = true and 3.0 otherwise min_gap_score 0.7 - max_must_overlap -2 - max_can_overlap 0.5 + max_can_overlap 1. short_overlap 6 artificial_gap 10 - use_old_score true min_overlap_length 10 - flank_addition_coefficient -5.9 - flank_multiplication_coefficient 0.97 + flank_multiplication_coefficient .5 + flank_addition_coefficient 5 var_coeff 3.0 basic_overlap_coeff 2.0 } - path_cleaning - { - enabled false - } - + path_cleaning_presets "" + use_coordinated_coverage false coordinated_coverage { @@ -88,6 +83,15 @@ params { min_path_len 1000 } + + simple_coverage_resolver { + enabled false + coverage_margin 2 + min_upper_coverage 5 + max_coverage_variation 5 + } + + scaffolding2015 { ; (median * (1+variation) > unique > median * (1 - variation)) relative_weight_cutoff 2.0 @@ -112,6 +116,9 @@ params { genome_consistency_checker { max_gap 1000 relative_max_gap 0.2 + use_main_storage true ; if set to true, next two parameters are set to min_unique_length + unresolvable_jump 1000 ; length of unresolvable repeats + unique_length 500 ; spelling genome in the alphabet of edges longer than this } uniqueness_analyser { @@ -160,5 +167,13 @@ long_reads { min_significant_overlap 200 } + rna_long_reads { + filtering 0.1 + weight_priority 1.1 + unique_edge_priority 2.0 + min_significant_overlap 0 + } + + } } diff --git a/src/SPAdes-3.14.0-Linux/share/spades/configs/debruijn/plasmid_mode.info b/src/SPAdes-3.14.0-Linux/share/spades/configs/debruijn/plasmid_mode.info new file mode 100644 index 0000000..4306b32 --- /dev/null +++ b/src/SPAdes-3.14.0-Linux/share/spades/configs/debruijn/plasmid_mode.info @@ -0,0 +1,26 @@ +mode plasmid + +plasmid +{ +;isolated + long_edge_length 1000 + edge_length_for_median 10000 + relative_coverage 0.3 + small_component_size 10000 + small_component_relative_coverage 1.5 + min_component_length 10000 + min_isolated_length 1000 +;meta + meta_mode true + absolute_coverage_cutoff 5 +;circular_removal + min_start_edge_length 2000 + min_start_coverage 20 + max_loop 150000 +; reference_removal replace this with path to reference and uncomment for reference based filtration +;iterative_coverage_elimination + iterative_coverage_elimination true + additive_step 5 + relative_step 1.3 + max_length 1000000 +} diff --git a/src/SPAdes-3.14.0-Linux/share/spades/configs/debruijn/rna_mode.info b/src/SPAdes-3.14.0-Linux/share/spades/configs/debruijn/rna_mode.info new file mode 100644 index 0000000..6f7b5db --- /dev/null +++ b/src/SPAdes-3.14.0-Linux/share/spades/configs/debruijn/rna_mode.info @@ -0,0 +1,211 @@ +mode rna + +preserve_raw_paired_index true +min_edge_length_for_is_count 500 + +calculate_coverage_for_each_lib true +strand_specificity { + ss_enabled false + antisense false +} + +ss_coverage_splitter { + enabled true + bin_size 50 + min_edge_len 200 + min_edge_coverage 5 + min_flanking_coverage 2 + coverage_margin 5 +} + +pacbio_processor +{ + internal_length_cutoff 100 +;align and traverse. + compression_cutoff 0.6 + path_limit_stretching 1.3 + path_limit_pressing 0.7 + max_path_in_dijkstra 5000 + max_vertex_in_dijkstra 1000 +;gap_closer + long_seq_limit 100 + enable_gap_closing false + enable_fl_gap_closing true + pacbio_min_gap_quantity 2 + contigs_min_gap_quantity 1 + max_contigs_gap_length 10000 +} + +contig_output { + scaffolds_name transcripts + ; none --- do not output broken scaffolds | break_gaps --- break only by N steches | break_all --- break all with overlap < k + output_broken_scaffolds none +} + +simp +{ + ;all topology based erroneous connection removers are off + topology_simplif_enabled false + + tc + { + ; rctc: tip_cov < rctc * not_tip_cov + ; tc_lb: max_tip_length = max((min(k, read_length / 2) * tc_lb), read_length); + condition "{ mmm 3 tc_lb 4, cb 100000, rctc 0.5 } { tc_lb 2, cb 1, rctc 10000 }" + } + + dead_end + { + enabled true + condition "{ tc_lb 3.5, cb 2 }" + } + + ; bulge remover: + br + { + enabled true + max_additive_length_coefficient 100 + max_coverage 1000000.0 + max_relative_coverage 100000.0 ; bulge_cov < this * not_bulge_cov + } + + ; erroneous connections remover: + ec + { + ; ec_lb: max_ec_length = k + ec_lb + ; icb: iterative coverage bound + ; to_ec_lb: max_ec_length = 2*tip_length(to_ec_lb) - 1 + ; nbr: use not bulge erroneous connections remover + ; condition "{ ec_lb 9, icb 40.0, nbr }" + condition "{ ec_lb 30, icb 200, rcec_cb 1.0 }" + } + + ; relative coverage erroneous connections remover: + rcec + { + rcec_lb 30 + rcec_cb 1.0 + enabled true + } + + rcc + { + enabled true + coverage_gap 20. + } + + ; hidden ec remover + her + { + ; TODO NB config also used in special rna mode version (always enabled) + enabled false + uniqueness_length 1500 + unreliability_threshold 0.2 + relative_threshold 5 + } + + ier + { + enabled true + use_rl_for_max_length true ; max_length will be taken max with read_length + use_rl_for_max_length_any_cov false ; use_rl_for_max_length_any_cov will be taken max with read_length + max_length 80 + max_coverage 2 + max_length_any_cov 0 + rl_threshold_increase 2 ; add this value to read length if used, i.e. flags above are set + } + +} + +; disable filtering in rna mode +de +{ + raw_filter_threshold 0 +} + +pe { +debug_output true + +params { + multi_path_extend true + + scaffolding_mode old + + overlap_removal { + enabled false + end_start_only true + cut_all true + } + + extension_options + { + single_threshold 0.05 + } + + scaffolder { + cutoff 1 + hard_cutoff 5 + rel_cov_cutoff 0.1 + cluster_info false + min_overlap_for_rna_scaffolding 8 + } + + path_cleaning_presets "default soft hard" + ; All length cutoffs presented in nucleotides + ; So edges less than or equal to (relative cutoff * RL - K) or (absolute cutoff - K) will be deleted + path_cleaning + { + enabled true + min_length 110 + isolated_min_length 130 + isolated_min_cov 4 + min_length_for_low_covered 140 + rel_cutoff 1.3 + rel_isolated_cutoff 1.5 + rel_low_covered_cutoff 1.6 + min_coverage 2 + } + + ; All length cutoffs presented in nucleotides + hard_path_cleaning + { + enabled true + min_length 130 + isolated_min_length 180 + isolated_min_cov 8 + min_length_for_low_covered 180 + rel_cutoff 1.5 + rel_isolated_cutoff 2.0 + rel_low_covered_cutoff 2.0 + min_coverage 3 + } + + ; All length cutoffs presented in nucleotides + soft_path_cleaning + { + enabled true + min_length 85 + isolated_min_length 100 + isolated_min_cov 2 + min_length_for_low_covered 130 + rel_cutoff 1.05 + rel_isolated_cutoff 1.2 + rel_low_covered_cutoff 1.5 + min_coverage 1 + } + + use_coordinated_coverage false + coordinated_coverage { + max_edge_length_repeat 1000 + delta 0.5 + min_path_len 300 + } + + simple_coverage_resolver { + enabled true + coverage_margin 2 + min_upper_coverage 2 + max_coverage_variation 10 + } +} +} diff --git a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/simplification.info b/src/SPAdes-3.14.0-Linux/share/spades/configs/debruijn/simplification.info similarity index 71% rename from src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/simplification.info rename to src/SPAdes-3.14.0-Linux/share/spades/configs/debruijn/simplification.info index 3ee8e02..7bc37f5 100644 --- a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/simplification.info +++ b/src/SPAdes-3.14.0-Linux/share/spades/configs/debruijn/simplification.info @@ -2,15 +2,40 @@ simp { + ; ==== RAW SIMPLIFICATION ==== + init_clean + { + self_conj_condition "{ ec_lb 100, cb 1.0 }" + early_it_only false + ; will be enabled only if average coverage >= activate_cov + ; if value < 0 check not performed + activation_cov 10. + + ; isolated edges remover + ier + { + enabled true + use_rl_for_max_length false ; max_length will be taken max with read_length + use_rl_for_max_length_any_cov true ; use_rl_for_max_length_any_cov will be taken max with read_length + max_length 0 ; will be taken max with read_length if option above is set + max_coverage 0 + max_length_any_cov 0 ; will be taken max with read_length if option above is set + rl_threshold_increase 0 ; add this value to read length if used, i.e. flags above are set + } + + tip_condition "{ tc_lb 3.5, cb auto }" + ec_condition "{ ec_lb 10, cb 2.0 }" + + ; edges with flank cov around alternative less than value will be disconnected + ; negative value to disable + disconnect_flank_cov -1.0 + } + + ; ==== SIMPLIFICATION CYCLE ==== + ; number of iterations in basic simplification cycle cycle_iter_count 10 - ; enable advanced simplification algo - post_simplif_enabled true - - ; enable advanced ec removal algo - topology_simplif_enabled false - ; tip clipper: tc { @@ -20,23 +45,24 @@ simp condition "{ tc_lb 3.5, cb 1000000, rctc 2.0 } { tc_lb 10., cb auto }" } - ; bulge remover: - br - { - enabled true + ; bulge remover: + br + { + enabled true main_iteration_only false - max_bulge_length_coefficient 3. ; max_bulge_length = max_bulge_length_coefficient * k - max_additive_length_coefficient 100 - max_coverage 1000.0 - max_relative_coverage 1.1 ; bulge_cov < this * not_bulge_cov - max_delta 3 - max_relative_delta 0.1 + max_bulge_length_coefficient 3. ; max_bulge_length = max_bulge_length_coefficient * k + max_additive_length_coefficient 100 + max_coverage 1000.0 + max_relative_coverage 1.1 ; bulge_cov < this * not_bulge_cov + max_delta 3 + max_relative_delta 0.1 max_number_edges 1000 + dijkstra_vertex_limit 3000 parallel true buff_size 10000 buff_cov_diff 2. buff_cov_rel_diff 0.2 - } + } ; erroneous connections remover: ec @@ -48,13 +74,20 @@ simp ; condition "{ ec_lb 9, icb 40.0 }" } + dead_end { + enabled false + condition "" + } + + ; ==== POST-SIMPLIFICATION ==== + ; relative coverage erroneous connections remover: rcec - { - enabled false - rcec_lb 30 - rcec_cb 0.5 - } + { + enabled false + rcec_lb 30 + rcec_cb 0.5 + } ; relative coverage erroneous component remover: rcc @@ -69,11 +102,12 @@ simp } ; relative edge disconnector: - relative_ed + red { enabled false diff_mult 20. edge_sum 10000 + unconditional_diff_mult 0. ; 0. to disable } ; final tip clipper: @@ -94,17 +128,60 @@ simp max_delta 3 max_relative_delta 0.1 max_number_edges 1000 + dijkstra_vertex_limit 3000 parallel true buff_size 10000 buff_cov_diff 2. buff_cov_rel_diff 0.2 } + + ; complex tip clipper + complex_tc + { + enabled false + max_relative_coverage -1 + max_edge_len 100 + condition "{ tc_lb 3.5 }" + } + + ; complex bulge remover + cbr + { + enabled false + max_relative_length 5. + max_length_difference 5 + } + + ; isolated edges remover + ier + { + enabled true + use_rl_for_max_length false ; max_length will be taken max with read_length + use_rl_for_max_length_any_cov true ; use_rl_for_max_length_any_cov will be taken max with read_length + max_length 0 ; will be taken max with read_length if option above is set + max_coverage 2 + max_length_any_cov 150 ; will be taken max with read_length if option above is set + rl_threshold_increase 0 ; add this value to read length if used, i.e. flags above are set + } + + ; hidden ec remover + her + { + enabled false + uniqueness_length 1500 + unreliability_threshold 4 + relative_threshold 5 + } + + ; ==== ADVANCED EC REMOVAL ALGO ==== + ; enable advanced ec removal algo + topology_simplif_enabled false ; topology based erroneous connection remover tec { max_ec_length_coefficient 55 ; max_ec_length = k + max_ec_length_coefficient - uniqueness_length 5000 + uniqueness_length 1500 plausibility_length 200 } @@ -132,22 +209,7 @@ simp uniqueness_length 1500 plausibility_length 200 } - - piec - { - max_ec_length_coefficient 30 ; max_ec_length = k + max_ec_length_coefficient - min_neighbour_length 100 - } - ; isolated edges remover - ier - { - enabled true - max_length 0 - max_coverage 2 - max_length_any_cov 150 ; will be taken max with read_length - } - ; topology tip clipper: ttc { @@ -156,58 +218,4 @@ simp uniqueness_length 1500 } - ; complex tip clipper - complex_tc - { - enabled false - max_relative_coverage -1 - max_edge_len 100 - condition "{ tc_lb 3.5 }" - } - - ; complex bulge remover - cbr - { - enabled false - max_relative_length 5. - max_length_difference 5 - } - - ; hidden ec remover - her - { - enabled false - uniqueness_length 1500 - unreliability_threshold 4 - relative_threshold 5 - } - - init_clean - { - self_conj_condition "{ ec_lb 100, cb 1.0 }" - early_it_only false - ; will be enabled only if average coverage \leq activate_cov - activation_cov 10. - - ; isolated edges remover - ier - { - enabled true - max_length 0 - max_coverage 0 - max_length_any_cov 0 ; will be taken max with read_length - } - - tip_condition "{ tc_lb 3.5, cb auto }" - ec_condition "{ ec_lb 10, cb 2.0 }" - ; edges with flank cov around alternative less than value will be disconnected - ; negative value to disable - disconnect_flank_cov -1.0 - } - - dead_end { - enabled false - condition "" - } - } diff --git a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets_archive/toy.info b/src/SPAdes-3.14.0-Linux/share/spades/configs/debruijn/toy.info similarity index 100% rename from src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/datasets_archive/toy.info rename to src/SPAdes-3.14.0-Linux/share/spades/configs/debruijn/toy.info diff --git a/src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/tsa.info b/src/SPAdes-3.14.0-Linux/share/spades/configs/debruijn/tsa.info similarity index 100% rename from src/SPAdes-3.10.1-Linux/share/spades/configs/debruijn/tsa.info rename to src/SPAdes-3.14.0-Linux/share/spades/configs/debruijn/tsa.info diff --git a/src/SPAdes-3.10.1-Linux/share/spades/configs/hammer/config.info b/src/SPAdes-3.14.0-Linux/share/spades/configs/hammer/config.info similarity index 100% rename from src/SPAdes-3.10.1-Linux/share/spades/configs/hammer/config.info rename to src/SPAdes-3.14.0-Linux/share/spades/configs/hammer/config.info diff --git a/src/SPAdes-3.10.1-Linux/share/spades/configs/ionhammer/ionhammer.cfg b/src/SPAdes-3.14.0-Linux/share/spades/configs/ionhammer/ionhammer.cfg similarity index 100% rename from src/SPAdes-3.10.1-Linux/share/spades/configs/ionhammer/ionhammer.cfg rename to src/SPAdes-3.14.0-Linux/share/spades/configs/ionhammer/ionhammer.cfg diff --git a/src/SPAdes-3.10.1-Linux/share/spades/joblib2/__init__.py b/src/SPAdes-3.14.0-Linux/share/spades/joblib2/__init__.py similarity index 100% rename from src/SPAdes-3.10.1-Linux/share/spades/joblib2/__init__.py rename to src/SPAdes-3.14.0-Linux/share/spades/joblib2/__init__.py diff --git a/src/SPAdes-3.10.1-Linux/share/spades/joblib2/disk.py b/src/SPAdes-3.14.0-Linux/share/spades/joblib2/disk.py similarity index 100% rename from src/SPAdes-3.10.1-Linux/share/spades/joblib2/disk.py rename to src/SPAdes-3.14.0-Linux/share/spades/joblib2/disk.py diff --git a/src/SPAdes-3.10.1-Linux/share/spades/joblib2/format_stack.py b/src/SPAdes-3.14.0-Linux/share/spades/joblib2/format_stack.py similarity index 100% rename from src/SPAdes-3.10.1-Linux/share/spades/joblib2/format_stack.py rename to src/SPAdes-3.14.0-Linux/share/spades/joblib2/format_stack.py diff --git a/src/SPAdes-3.10.1-Linux/share/spades/joblib2/func_inspect.py b/src/SPAdes-3.14.0-Linux/share/spades/joblib2/func_inspect.py similarity index 100% rename from src/SPAdes-3.10.1-Linux/share/spades/joblib2/func_inspect.py rename to src/SPAdes-3.14.0-Linux/share/spades/joblib2/func_inspect.py diff --git a/src/SPAdes-3.10.1-Linux/share/spades/joblib2/functools.py b/src/SPAdes-3.14.0-Linux/share/spades/joblib2/functools.py similarity index 100% rename from src/SPAdes-3.10.1-Linux/share/spades/joblib2/functools.py rename to src/SPAdes-3.14.0-Linux/share/spades/joblib2/functools.py diff --git a/src/SPAdes-3.10.1-Linux/share/spades/joblib2/hashing.py b/src/SPAdes-3.14.0-Linux/share/spades/joblib2/hashing.py similarity index 100% rename from src/SPAdes-3.10.1-Linux/share/spades/joblib2/hashing.py rename to src/SPAdes-3.14.0-Linux/share/spades/joblib2/hashing.py diff --git a/src/SPAdes-3.10.1-Linux/share/spades/joblib2/logger.py b/src/SPAdes-3.14.0-Linux/share/spades/joblib2/logger.py similarity index 100% rename from src/SPAdes-3.10.1-Linux/share/spades/joblib2/logger.py rename to src/SPAdes-3.14.0-Linux/share/spades/joblib2/logger.py diff --git a/src/SPAdes-3.10.1-Linux/share/spades/joblib2/memory.py b/src/SPAdes-3.14.0-Linux/share/spades/joblib2/memory.py similarity index 100% rename from src/SPAdes-3.10.1-Linux/share/spades/joblib2/memory.py rename to src/SPAdes-3.14.0-Linux/share/spades/joblib2/memory.py diff --git a/src/SPAdes-3.10.1-Linux/share/spades/joblib2/my_exceptions.py b/src/SPAdes-3.14.0-Linux/share/spades/joblib2/my_exceptions.py similarity index 100% rename from src/SPAdes-3.10.1-Linux/share/spades/joblib2/my_exceptions.py rename to src/SPAdes-3.14.0-Linux/share/spades/joblib2/my_exceptions.py diff --git a/src/SPAdes-3.10.1-Linux/share/spades/joblib2/numpy_pickle.py b/src/SPAdes-3.14.0-Linux/share/spades/joblib2/numpy_pickle.py similarity index 100% rename from src/SPAdes-3.10.1-Linux/share/spades/joblib2/numpy_pickle.py rename to src/SPAdes-3.14.0-Linux/share/spades/joblib2/numpy_pickle.py diff --git a/src/SPAdes-3.10.1-Linux/share/spades/joblib2/parallel.py b/src/SPAdes-3.14.0-Linux/share/spades/joblib2/parallel.py similarity index 100% rename from src/SPAdes-3.10.1-Linux/share/spades/joblib2/parallel.py rename to src/SPAdes-3.14.0-Linux/share/spades/joblib2/parallel.py diff --git a/src/SPAdes-3.10.1-Linux/share/spades/joblib2/testing.py b/src/SPAdes-3.14.0-Linux/share/spades/joblib2/testing.py similarity index 100% rename from src/SPAdes-3.10.1-Linux/share/spades/joblib2/testing.py rename to src/SPAdes-3.14.0-Linux/share/spades/joblib2/testing.py diff --git a/src/SPAdes-3.10.1-Linux/share/spades/joblib3/__init__.py b/src/SPAdes-3.14.0-Linux/share/spades/joblib3/__init__.py similarity index 100% rename from src/SPAdes-3.10.1-Linux/share/spades/joblib3/__init__.py rename to src/SPAdes-3.14.0-Linux/share/spades/joblib3/__init__.py diff --git a/src/SPAdes-3.10.1-Linux/share/spades/joblib3/_compat.py b/src/SPAdes-3.14.0-Linux/share/spades/joblib3/_compat.py similarity index 100% rename from src/SPAdes-3.10.1-Linux/share/spades/joblib3/_compat.py rename to src/SPAdes-3.14.0-Linux/share/spades/joblib3/_compat.py diff --git a/src/SPAdes-3.10.1-Linux/share/spades/joblib3/_memory_helpers.py b/src/SPAdes-3.14.0-Linux/share/spades/joblib3/_memory_helpers.py similarity index 100% rename from src/SPAdes-3.10.1-Linux/share/spades/joblib3/_memory_helpers.py rename to src/SPAdes-3.14.0-Linux/share/spades/joblib3/_memory_helpers.py diff --git a/src/SPAdes-3.10.1-Linux/share/spades/joblib3/_multiprocessing_helpers.py b/src/SPAdes-3.14.0-Linux/share/spades/joblib3/_multiprocessing_helpers.py similarity index 100% rename from src/SPAdes-3.10.1-Linux/share/spades/joblib3/_multiprocessing_helpers.py rename to src/SPAdes-3.14.0-Linux/share/spades/joblib3/_multiprocessing_helpers.py diff --git a/src/SPAdes-3.10.1-Linux/share/spades/joblib3/disk.py b/src/SPAdes-3.14.0-Linux/share/spades/joblib3/disk.py similarity index 100% rename from src/SPAdes-3.10.1-Linux/share/spades/joblib3/disk.py rename to src/SPAdes-3.14.0-Linux/share/spades/joblib3/disk.py diff --git a/src/SPAdes-3.10.1-Linux/share/spades/joblib3/format_stack.py b/src/SPAdes-3.14.0-Linux/share/spades/joblib3/format_stack.py similarity index 100% rename from src/SPAdes-3.10.1-Linux/share/spades/joblib3/format_stack.py rename to src/SPAdes-3.14.0-Linux/share/spades/joblib3/format_stack.py diff --git a/src/SPAdes-3.10.1-Linux/share/spades/joblib3/func_inspect.py b/src/SPAdes-3.14.0-Linux/share/spades/joblib3/func_inspect.py similarity index 100% rename from src/SPAdes-3.10.1-Linux/share/spades/joblib3/func_inspect.py rename to src/SPAdes-3.14.0-Linux/share/spades/joblib3/func_inspect.py diff --git a/src/SPAdes-3.10.1-Linux/share/spades/joblib3/hashing.py b/src/SPAdes-3.14.0-Linux/share/spades/joblib3/hashing.py similarity index 100% rename from src/SPAdes-3.10.1-Linux/share/spades/joblib3/hashing.py rename to src/SPAdes-3.14.0-Linux/share/spades/joblib3/hashing.py diff --git a/src/SPAdes-3.10.1-Linux/share/spades/joblib3/logger.py b/src/SPAdes-3.14.0-Linux/share/spades/joblib3/logger.py similarity index 100% rename from src/SPAdes-3.10.1-Linux/share/spades/joblib3/logger.py rename to src/SPAdes-3.14.0-Linux/share/spades/joblib3/logger.py diff --git a/src/SPAdes-3.10.1-Linux/share/spades/joblib3/memory.py b/src/SPAdes-3.14.0-Linux/share/spades/joblib3/memory.py similarity index 100% rename from src/SPAdes-3.10.1-Linux/share/spades/joblib3/memory.py rename to src/SPAdes-3.14.0-Linux/share/spades/joblib3/memory.py diff --git a/src/SPAdes-3.10.1-Linux/share/spades/joblib3/my_exceptions.py b/src/SPAdes-3.14.0-Linux/share/spades/joblib3/my_exceptions.py similarity index 100% rename from src/SPAdes-3.10.1-Linux/share/spades/joblib3/my_exceptions.py rename to src/SPAdes-3.14.0-Linux/share/spades/joblib3/my_exceptions.py diff --git a/src/SPAdes-3.10.1-Linux/share/spades/joblib3/numpy_pickle.py b/src/SPAdes-3.14.0-Linux/share/spades/joblib3/numpy_pickle.py similarity index 100% rename from src/SPAdes-3.10.1-Linux/share/spades/joblib3/numpy_pickle.py rename to src/SPAdes-3.14.0-Linux/share/spades/joblib3/numpy_pickle.py diff --git a/src/SPAdes-3.10.1-Linux/share/spades/joblib3/parallel.py b/src/SPAdes-3.14.0-Linux/share/spades/joblib3/parallel.py similarity index 100% rename from src/SPAdes-3.10.1-Linux/share/spades/joblib3/parallel.py rename to src/SPAdes-3.14.0-Linux/share/spades/joblib3/parallel.py diff --git a/src/SPAdes-3.10.1-Linux/share/spades/joblib3/pool.py b/src/SPAdes-3.14.0-Linux/share/spades/joblib3/pool.py similarity index 100% rename from src/SPAdes-3.10.1-Linux/share/spades/joblib3/pool.py rename to src/SPAdes-3.14.0-Linux/share/spades/joblib3/pool.py diff --git a/src/SPAdes-3.10.1-Linux/share/spades/joblib3/testing.py b/src/SPAdes-3.14.0-Linux/share/spades/joblib3/testing.py similarity index 100% rename from src/SPAdes-3.10.1-Linux/share/spades/joblib3/testing.py rename to src/SPAdes-3.14.0-Linux/share/spades/joblib3/testing.py diff --git a/src/SPAdes-3.14.0-Linux/share/spades/manual.html b/src/SPAdes-3.14.0-Linux/share/spades/manual.html new file mode 100644 index 0000000..6aacf5a --- /dev/null +++ b/src/SPAdes-3.14.0-Linux/share/spades/manual.html @@ -0,0 +1,815 @@ + SPAdes 3.13.1 Manual

    SPAdes 3.14.0 Manual

    +
      +
    1. About SPAdes
      +    1.1. Supported data types
      +    1.2. SPAdes pipeline
      +    1.3. SPAdes performance
    2. +
    3. Installation
      +    2.1. Downloading SPAdes Linux binaries
      +    2.2. Downloading SPAdes binaries for Mac
      +    2.3. Downloading and compiling SPAdes source code
      +    2.4. Verifying your installation
    4. +
    5. Running SPAdes
      +    3.1. SPAdes input
      +    3.2. SPAdes command line options
      +    3.3. Assembling IonTorrent reads
      +    3.4. Assembling long Illumina paired reads (2x150 and 2x250)
      +    3.5. SPAdes output
      +    3.6. plasmidSPAdes output
      +    3.7. biosyntheticSPAdes output
      +    3.8. Assembly evaluation
    6. +
    7. Stand-alone binaries released within SPAdes package
      +    4.1. k-mer counting
      +    4.2. k-mer coverage read filter
      +    4.3. k-mer cardinality estimating
      +    4.4. Graph construction
      +    4.5. Long read to graph alignment
      +        4.5.1. hybridSPAdes aligner
      +        4.5.2. SPAligner
    8. +
    9. Citation
    10. +
    11. Feedback and bug reports
    12. +
    +

    +

    About SPAdes

    +

    SPAdes – St. Petersburg genome assembler – is an assembly toolkit containing various assembly pipelines. This manual will help you to install and run SPAdes. SPAdes version 3.14.0 was released under GPLv2 on December 27, 2019 and can be downloaded from http://cab.spbu.ru/software/spades/.

    +

    +

    Supported data types

    +

    The current version of SPAdes works with Illumina or IonTorrent reads and is capable of providing hybrid assemblies using PacBio, Oxford Nanopore and Sanger reads. You can also provide additional contigs that will be used as long reads.

    +

    Version 3.14.0 of SPAdes supports paired-end reads, mate-pairs and unpaired reads. SPAdes can take as input several paired-end and mate-pair libraries simultaneously. Note, that SPAdes was initially designed for small genomes. It was tested on bacterial (both single-cell MDA and standard isolates), fungal and other small genomes. SPAdes is not intended for larger genomes (e.g. mammalian size genomes). For such purposes you can use it at your own risk.

    +

    If you have high-coverage data for bacterial/viral isolate or multi-cell organism, we highly recommend to use --isolate option.

    +

    SPAdes 3.14.0 includes the following additional pipelines:

    +
      +
    • metaSPAdes – a pipeline for metagenomic data sets (see metaSPAdes options).
    • +
    • plasmidSPAdes – a pipeline for extracting and assembling plasmids from WGS data sets (see plasmidSPAdes options).
    • +
    • rnaSPAdes – a de novo transcriptome assembler from RNA-Seq data (see rnaSPAdes manual).
    • +
    • truSPAdes – a module for TruSeq barcode assembly (see truSPAdes manual).
    • +
    • biosyntheticSPAdes – a module for biosynthetic gene cluster assembly with paired-end reads (see biosynthicSPAdes options).
    • +
    +

    In addition, we provide several stand-alone binaries with relatively simple command-line interface: k-mer counting (spades-kmercounter), assembly graph construction (spades-gbuilder) and long read to graph aligner (spades-gmapper). To learn options of these tools you can either run them without any parameters or read this section.

    +

    +

    +

    SPAdes pipeline

    +

    SPAdes comes in several separate modules:

    +
      +
    • BayesHammer – read error correction tool for Illumina reads, which works well on both single-cell and standard data sets.
    • +
    • IonHammer – read error correction tool for IonTorrent data, which also works on both types of data.
    • +
    • SPAdes – iterative short-read genome assembly module; values of K are selected automatically based on the read length and data set type.
    • +
    • MismatchCorrector – a tool which improves mismatch and short indel rates in resulting contigs and scaffolds; this module uses the BWA tool [Li H. and Durbin R., 2009]; MismatchCorrector is turned off by default, but we recommend to turn it on (see SPAdes options section).
    • +
    +

    We recommend to run SPAdes with BayesHammer/IonHammer to obtain high-quality assemblies. However, if you use your own read correction tool, it is possible to turn error correction module off. It is also possible to use only the read error correction stage, if you wish to use another assembler. See the SPAdes options section.

    +

    +

    SPAdes performance

    +

    In this section we give approximate data about SPAdes performance on two data sets:

    + +

    We ran SPAdes with default parameters using 16 threads on a server with Intel Xeon 2.27GHz processors. BayesHammer runs in approximately half an hour and takes up to 8Gb of RAM to perform read error correction on each data set. Assembly takes about 10 minutes for the E. coli isolate data set and 20 minutes for the E. coli single-cell data set. Both data sets require about 8Gb of RAM (see notes below). MismatchCorrector runs for about 15 minutes on both data sets, and requires less than 2Gb of RAM. All modules also require additional disk space for storing results (corrected reads, contigs, etc) and temporary files. See the table below for more precise values.

    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    Data set   E. coli isolate E. coli single-cell
    Stage Time Peak RAM
    usage (Gb)
    Additional
    disk space (Gb)
    Time Peak RAM
    usage (Gb)
    Additional
    disk space (Gb)
    BayesHammer 24m 7.8 8.5 25m 7.7 8.6
    SPAdes 8m 8.4 1.4 10m 8.3 2.1
    MismatchCorrector 10m 1.7 21.4 12m 1.8 22.4
    Whole pipeline 42m 8.4 23.9 47m 8.3 25.1
    +

    Notes:

    +
      +
    • Running SPAdes without preliminary read error correction (e.g. without BayesHammer or IonHammer) will likely require more time and memory.
    • +
    • Each module removes its temporary files as soon as it finishes.
    • +
    • SPAdes uses 512 Mb per thread for buffers, which results in higher memory consumption. If you set memory limit manually, SPAdes will use smaller buffers and thus less RAM.
    • +
    • Performance statistics is given for SPAdes version 3.14.0.
    • +
    +

    +

    Installation

    +

    SPAdes requires a 64-bit Linux system or Mac OS and Python (supported versions are Python2: 2.4–2.7, and Python3: 3.2 and higher) to be pre-installed on it. To obtain SPAdes you can either download binaries or download source code and compile it yourself.

    +

    In case of successful installation the following files will be placed in the bin directory:

    +
      +
    • spades.py (main executable script)
    • +
    • metaspades.py (main executable script for metaSPAdes)
    • +
    • plasmidspades.py (main executable script for plasmidSPAdes)
    • +
    • rnaspades.py (main executable script for rnaSPAdes)
    • +
    • truspades.py (main executable script for truSPAdes)
    • +
    • spades-core (assembly module)
    • +
    • spades-gbuilder (standalone graph builder application)
    • +
    • spades-gmapper (standalone long read to graph aligner)
    • +
    • spades-kmercount (standalone k-mer counting application)
    • +
    • spades-hammer (read error correcting module for Illumina reads)
    • +
    • spades-ionhammer (read error correcting module for IonTorrent reads)
    • +
    • spades-bwa (BWA alignment module which is required for mismatch correction)
    • +
    • spades-corrector-core (mismatch correction module)
    • +
    • spades-truseq-scfcorrection (executable used in truSPAdes pipeline)
    • +
    +

    +

    Downloading SPAdes Linux binaries

    +

    To download SPAdes Linux binaries and extract them, go to the directory in which you wish SPAdes to be installed and run:

    +
        wget http://cab.spbu.ru/files/release3.14.0/SPAdes-3.14.0-Linux.tar.gz
    +    tar -xzf SPAdes-3.14.0-Linux.tar.gz
    +    cd SPAdes-3.14.0-Linux/bin/
    +

    In this case you do not need to run any installation scripts – SPAdes is ready to use. We also suggest adding SPAdes installation directory to the PATH variable.

    +

    Note, that pre-build binaries do not work on new Linux kernels.

    +

    +

    Downloading SPAdes binaries for Mac

    +

    To obtain SPAdes binaries for Mac, go to the directory in which you wish SPAdes to be installed and run:

    +
        curl http://cab.spbu.ru/files/release3.14.0/SPAdes-3.14.0-Darwin.tar.gz -o SPAdes-3.14.0-Darwin.tar.gz
    +    tar -zxf SPAdes-3.14.0-Darwin.tar.gz
    +    cd SPAdes-3.14.0-Darwin/bin/
    +

    Just as in Linux, SPAdes is ready to use and no further installation steps are required. We also suggest adding SPAdes installation directory to the PATH variable.

    +

    +

    Downloading and compiling SPAdes source code

    +

    If you wish to compile SPAdes by yourself you will need the following libraries to be pre-installed:

    +
      +
    • g++ (version 5.3.1 or higher)
    • +
    • cmake (version 2.8.12 or higher)
    • +
    • zlib
    • +
    • libbz2
    • +
    +

    If you meet these requirements, you can download the SPAdes source code:

    +
        wget http://cab.spbu.ru/files/release3.14.0/SPAdes-3.14.0.tar.gz
    +    tar -xzf SPAdes-3.14.0.tar.gz
    +    cd SPAdes-3.14.0
    +

    and build it with the following script:

    +
        ./spades_compile.sh
    +

    SPAdes will be built in the directory ./bin. If you wish to install SPAdes into another directory, you can specify full path of destination folder by running the following command in bash or sh:

    +
        PREFIX=<destination_dir> ./spades_compile.sh
    +

    for example:

    +
        PREFIX=/usr/local ./spades_compile.sh
    +

    which will install SPAdes into /usr/local/bin.

    +

    After installation you will get the same files (listed above) in ./bin directory (or <destination_dir>/bin if you specified PREFIX). We also suggest adding SPAdes installation directory to the PATH variable.

    +

    +

    Verifying your installation

    +

    For testing purposes, SPAdes comes with a toy data set (reads that align to first 1000 bp of E. coli). To try SPAdes on this data set, run:

    +
        <spades installation dir>/spades.py --test
    +

    If you added SPAdes installation directory to the PATH variable, you can run:

    +
        spades.py --test
    +

    For the simplicity we further assume that SPAdes installation directory is added to the PATH variable.

    +

    If the installation is successful, you will find the following information at the end of the log:

    +
    
    +===== Assembling finished. Used k-mer sizes: 21, 33, 55
    +
    + * Corrected reads are in spades_test/corrected/
    + * Assembled contigs are in spades_test/contigs.fasta
    + * Assembled scaffolds are in spades_test/scaffolds.fasta
    + * Assembly graph is in spades_test/assembly_graph.fastg
    + * Assembly graph in GFA format is in spades_test/assembly_graph.gfa
    + * Paths in the assembly graph corresponding to the contigs are in spades_test/contigs.paths
    + * Paths in the assembly graph corresponding to the scaffolds are in spades_test/scaffolds.paths
    +
    +======= SPAdes pipeline finished.
    +
    +========= TEST PASSED CORRECTLY.
    +
    +SPAdes log can be found here: spades_test/spades.log
    +
    +Thank you for using SPAdes!
    +
    +

    +

    Running SPAdes

    +

    +

    SPAdes input

    +

    SPAdes takes as input paired-end reads, mate-pairs and single (unpaired) reads in FASTA and FASTQ. For IonTorrent data SPAdes also supports unpaired reads in unmapped BAM format (like the one produced by Torrent Server). However, in order to run read error correction, reads should be in FASTQ or BAM format. Sanger, Oxford Nanopore and PacBio CLR reads can be provided in both formats since SPAdes does not run error correction for these types of data.

    +

    To run SPAdes 3.14.0 you need at least one library of the following types:

    +
      +
    • Illumina paired-end/high-quality mate-pairs/unpaired reads
    • +
    • IonTorrent paired-end/high-quality mate-pairs/unpaired reads
    • +
    • PacBio CCS reads
    • +
    +

    Illumina and IonTorrent libraries should not be assembled together. All other types of input data are compatible. SPAdes should not be used if only PacBio CLR, Oxford Nanopore, Sanger reads or additional contigs are available.

    +

    SPAdes supports mate-pair only assembly. However, we recommend to use only high-quality mate-pair libraries in this case (e.g. that do not have a paired-end part). We tested mate-pair only pipeline using Illumina Nextera mate-pairs. See more here.

    +

    Current version SPAdes also supports Lucigen NxSeq® Long Mate Pair libraries, which always have forward-reverse orientation. If you wish to use Lucigen NxSeq® Long Mate Pair reads, you will need Python regex library to be pre-installed on your machine. You can install it with Python pip-installer:

    +
        pip install regex
    +

    or with the Easy Install Python module:

    +
        easy_install regex
    +

    Notes:

    +
      +
    • It is strongly suggested to provide multiple paired-end and mate-pair libraries according to their insert size (from smallest to longest).
    • +
    • It is not recommended to run SPAdes on PacBio reads with low coverage (less than 5).
    • +
    • We suggest not to run SPAdes on PacBio reads for large genomes.
    • +
    • SPAdes accepts gzip-compressed files.
    • +
    +

    Read-pair libraries

    +

    By using command line interface, you can specify up to nine different paired-end libraries, up to nine mate-pair libraries and also up to nine high-quality mate-pair ones. If you wish to use more, you can use YAML data set file. We further refer to paired-end and mate-pair libraries simply as to read-pair libraries.

    +

    By default, SPAdes assumes that paired-end and high-quality mate-pair reads have forward-reverse (fr) orientation and usual mate-pairs have reverse-forward (rf) orientation. However, different orientations can be set for any library by using SPAdes options.

    +

    To distinguish reads in pairs we refer to them as left and right reads. For forward-reverse orientation, the forward reads correspond to the left reads and the reverse reads, to the right. Similarly, in reverse-forward orientation left and right reads correspond to reverse and forward reads, respectively, etc.

    +

    Each read-pair library can be stored in several files or several pairs of files. Paired reads can be organized in two different ways:

    +
      +
    • In file pairs. In this case left and right reads are placed in different files and go in the same order in respective files.
    • +
    • In interleaved files. In this case, the reads are interlaced, so that each right read goes after the corresponding paired left read.
    • +
    +

    For example, Illumina produces paired-end reads in two files: R1.fastq and R2.fastq. If you choose to store reads in file pairs make sure that for every read from R1.fastq the corresponding paired read from R2.fastq is placed in the respective paired file on the same line number. If you choose to use interleaved files, every read from R1.fastq should be followed by the corresponding paired read from R2.fastq.

    +

    If adapter and/or quality trimming software has been used prior to assembly, files with the orphan reads can be provided as "single read files" for the corresponding read-pair library.

    +


    +If you have merged some of the reads from your paired-end (not mate-pair or high-quality mate-pair) library (using tools s.a. BBMerge or STORM), you should provide the file with resulting reads as a "merged read file" for the corresponding library.
    +Note that non-empty files with the remaining unmerged left/right reads (separate or interlaced) must be provided for the same library (for SPAdes to correctly detect the original read length).

    +

    In an unlikely case some of the reads from your mate-pair (or high-quality mate-pair) library are "merged", you should provide the resulting reads as a SEPARATE single-read library.

    +

    Unpaired (single-read) libraries

    +

    By using command line interface, you can specify up to nine different single-read libraries. To input more libraries, you can use YAML data set file.

    +

    Single librairies are assumed to have high quality and a reasonable coverage. For example, you can provide PacBio CCS reads as a single-read library.

    +

    Note, that you should not specify PacBio CLR, Sanger reads or additional contigs as single-read libraries, each of them has a separate option.

    +

    +

    PacBio and Oxford Nanopore reads

    +

    SPAdes can take as an input an unlimited number of PacBio and Oxford Nanopore libraries.

    +

    PacBio CLR and Oxford Nanopore reads are used for hybrid assemblies (e.g. with Illumina or IonTorrent). There is no need to pre-correct this kind of data. SPAdes will use PacBio CLR and Oxford Nanopore reads for gap closure and repeat resolution.

    +

    For PacBio you just need to have filtered subreads in FASTQ/FASTA format. Provide these filtered subreads using --pacbio option. Oxford Nanopore reads are provided with --nanopore option.

    +

    PacBio CCS/Reads of Insert reads or pre-corrected (using third-party software) PacBio CLR / Oxford Nanopore reads can be simply provided as single reads to SPAdes.

    +

    Additional contigs

    +

    In case you have contigs of the same genome generated by other assembler(s) and you wish to merge them into SPAdes assembly, you can specify additional contigs using --trusted-contigs or --untrusted-contigs. First option is used when high quality contigs are available. These contigs will be used for graph construction, gap closure and repeat resolution. Second option is used for less reliable contigs that may have more errors or contigs of unknown quality. These contigs will be used only for gap closure and repeat resolution. The number of additional contigs is unlimited.

    +

    Note, that SPAdes does not perform assembly using genomes of closely-related species. Only contigs of the same genome should be specified.

    +


    +

    +

    SPAdes command line options

    +

    To run SPAdes from the command line, type

    +
        spades.py [options] -o <output_dir>
    +

    Note that we assume that SPAdes installation directory is added to the PATH variable (provide full path to SPAdes executable otherwise: <spades installation dir>/spades.py).

    +

    +

    Basic options

    +

    -o <output_dir>
    +    Specify the output directory. Required option.

    +

    +


    +--isolate
    +    This flag is highly recommended for high-coverage isolate and multi-cell data; improves the assembly quality and running time.
    +Not compatible with --only-error-correction or --careful options.

    +


    +--sc
    +    This flag is required for MDA (single-cell) data.

    +

    +


    +--meta   (same as metaspades.py)
    +    This flag is recommended when assembling metagenomic data sets (runs metaSPAdes, see paper for more details). Currently metaSPAdes supports only a single short-read library which has to be paired-end (we hope to remove this restriction soon). In addition, you can provide long reads (e.g. using --pacbio or --nanopore options), but hybrid assembly for metagenomes remains an experimental pipeline and optimal performance is not guaranteed. It does not support careful mode (mismatch correction is not available). In addition, you cannot specify coverage cutoff for metaSPAdes. Note that metaSPAdes might be very sensitive to presence of the technical sequences remaining in the data (most notably adapter readthroughs), please run quality control and pre-process your data accordingly.

    +

    +


    +--plasmid   (same as plasmidspades.py)
    +    This flag is required when assembling only plasmids from WGS data sets (runs plasmidSPAdes, see paper for the algorithm details). Note, that plasmidSPAdes is not compatible with metaSPAdes and single-cell mode. Additionally, we do not recommend to run plasmidSPAdes on more than one library. See section 3.6 for plasmidSPAdes output details.

    +

    +


    +--bio
    +    This flag is required when assembling only non-ribosomal and polyketide gene clusters from WGS data sets (runs biosyntheticSPAdes, see paper for the algorithm details). biosyntheticSPAdes is supposed to work on isolate or metagenomic WGS dataset. Note, that biosyntheticSPAdes is not compatible with any other modes. See section 3.7 for biosyntheticSPAdes output details.

    +

    +


    +--rna   (same as rnaspades.py)
    +    This flag should be used when assembling RNA-Seq data sets (runs rnaSPAdes). To learn more, see rnaSPAdes manual.
    +Not compatible with --only-error-correction or --careful options.

    +

    --iontorrent
    +    This flag is required when assembling IonTorrent data. Allows BAM files as input. Carefully read section 3.3 before using this option.

    +

    --test
    +    Runs SPAdes on the toy data set; see section 2.4.

    +

    -h (or --help)
    +    Prints help.

    +

    -v (or --version)
    +    Prints SPAdes version.

    +


    +

    +

    Pipeline options

    +

    --only-error-correction
    +    Performs read error correction only.

    +

    --only-assembler
    +    Runs assembly module only.

    +


    +
    +--careful
    +    Tries to reduce the number of mismatches and short indels. Also runs MismatchCorrector – a post processing tool, which uses BWA tool (comes with SPAdes). This option is recommended only for assembly of small genomes. We strongly recommend not to use it for large and medium-size eukaryotic genomes. Note, that this options is is not supported by metaSPAdes and rnaSPAdes.

    +

    --continue
    +    Continues SPAdes run from the specified output folder starting from the last available check-point. Check-points are made after:

    +
      +
    • error correction module is finished
    • +
    • iteration for each specified K value of assembly module is finished
    • +
    • mismatch correction is finished for contigs or scaffolds
    • +
    +

    For example, if specified K values are 21, 33 and 55 and SPAdes was stopped or crashed during assembly stage with K = 55, you can run SPAdes with the --continue option specifying the same output directory. SPAdes will continue the run starting from the assembly stage with K = 55. Error correction module and iterations for K equal to 21 and 33 will not be run again. If --continue is set, the only allowed option is -o <output_dir> .

    +

    --restart-from <check_point>
    +    Restart SPAdes run from the specified output folder starting from the specified check-point. Check-points are:

    +
      +
    • ec – start from error correction
    • +
    • as – restart assembly module from the first iteration
    • +
    • k<int> – restart from the iteration with specified k values, e.g. k55 (not available in RNA-Seq mode)
    • +
    • mc – restart mismatch correction
    • +
    • last – restart from the last available check-point (similar to --continue)
    • +
    +

    In contrast to the --continue option, you can change some of the options when using --restart-from. You can change any option except: all basic options, all options for specifying input data (including --dataset), --only-error-correction option and --only-assembler option. For example, if you ran assembler with k values 21,33,55 without mismatch correction, you can add one more iteration with k=77 and run mismatch correction step by running SPAdes with following options:
    +--restart-from k55 -k 21,33,55,77 --mismatch-correction -o <previous_output_dir>.
    +Since all files will be overwritten, do not forget to copy your assembly from the previous run if you need it.

    +

    --disable-gzip-output
    +    Forces read error correction module not to compress the corrected reads. If this options is not set, corrected reads will be in *.fastq.gz format.

    +

    +

    +

    Input data

    +

    Specifying single library (paired-end or single-read)

    +

    --12 <file_name>
    +    File with interlaced forward and reverse paired-end reads.

    +

    -1 <file_name>
    +    File with forward reads.

    +

    -2 <file_name>
    +    File with reverse reads.

    +

    --merged <file_name>
    +    File with merged paired reads.
    +    If the properties of the library permit, overlapping paired-end reads can be merged using special software.
    +    Non-empty files with (remaining) unmerged left/right reads (separate or interlaced) must be provided for the same library for SPAdes to correctly detect the original read length.

    +

    -s <file_name>
    +    File with unpaired reads.

    +

    Specifying multiple libraries

    +

    Single-read libraries

    +

    --s<#> <file_name>
    +    File for single-read library number <#> (<#> = 1,2,..,9). For example, for the first paired-end library the option is: --s1 <file_name>
    +    Do not use -s options for single-read libraries, since it specifies unpaired reads for the first paired-end library.

    +

    Paired-end libraries

    +

    --pe<#>-12 <file_name>
    +    File with interlaced reads for paired-end library number <#> (<#> = 1,2,..,9). For example, for the first single-read library the option is: --pe1-12 <file_name>

    +

    --pe<#>-1 <file_name>
    +    File with left reads for paired-end library number <#> (<#> = 1,2,..,9).

    +

    --pe<#>-2 <file_name>
    +    File with right reads for paired-end library number <#> (<#> = 1,2,..,9).

    +

    --pe<#>-m <file_name>
    +    File with merged reads from paired-end library number <#> (<#> = 1,2,..,9)
    +    If the properties of the library permit, paired reads can be merged using special software.     Non-empty files with (remaining) unmerged left/right reads (separate or interlaced) must be provided for the same library for SPAdes to correctly detect the original read length.

    +

    --pe<#>-s <file_name>
    +    File with unpaired reads from paired-end library number <#> (<#> = 1,2,..,9)
    +    For example, paired reads can become unpaired during the error correction procedure.

    +

    --pe<#>-<or>
    +    Orientation of reads for paired-end library number <#> (<#> = 1,2,..,9; <or> = "fr","rf","ff").
    +    The default orientation for paired-end libraries is forward-reverse (--> <--). For example, to specify reverse-forward orientation for the second paired-end library, you should use the flag: --pe2-rf
    +Should not be confused with FR and RF strand-specificity for RNA-Seq data (see rnaSPAdes manual).

    +

    Mate-pair libraries

    +

    --mp<#>-12 <file_name>
    +    File with interlaced reads for mate-pair library number <#> (<#> = 1,2,..,9).

    +

    --mp<#>-1 <file_name>
    +    File with left reads for mate-pair library number <#> (<#> = 1,2,..,9).

    +

    --mp<#>-2 <file_name>
    +    File with right reads for mate-pair library number <#> (<#> = 1,2,..,9).

    +

    --mp<#>-<or>
    +    Orientation of reads for mate-pair library number <#> (<#> = 1,2,..,9; <or> = "fr","rf","ff").
    +    The default orientation for mate-pair libraries is reverse-forward (<-- -->). For example, to specify forward-forward orientation for the first mate-pair library, you should use the flag: --mp1-ff

    +


    +High-quality mate-pair libraries (can be used for mate-pair only assembly)

    +

    --hqmp<#>-12 <file_name>
    +    File with interlaced reads for high-quality mate-pair library number <#> (<#> = 1,2,..,9).

    +

    --hqmp<#>-1 <file_name>
    +    File with left reads for high-quality mate-pair library number <#> (<#> = 1,2,..,9).

    +

    --hqmp<#>-2 <file_name>
    +    File with right reads for high-quality mate-pair library number <#> (<#> = 1,2,..,9).

    +

    --hqmp<#>-s <file_name>
    +    File with unpaired reads from high-quality mate-pair library number <#> (<#> = 1,2,..,9)

    +

    --hqmp<#>-<or>
    +    Orientation of reads for high-quality mate-pair library number <#> (<#> = 1,2,..,9; <or> = "fr","rf","ff").
    +    The default orientation for high-quality mate-pair libraries is forward-reverse (--> <--). For example, to specify reverse-forward orientation for the first high-quality mate-pair library, you should use the flag: --hqmp1-rf

    +


    +Lucigen NxSeq® Long Mate Pair libraries (see section 3.1 for details)

    +

    --nxmate<#>-1 <file_name>
    +    File with left reads for Lucigen NxSeq® Long Mate Pair library number <#> (<#> = 1,2,..,9).

    +

    --nxmate<#>-2 <file_name>
    +    File with right reads for Lucigen NxSeq® Long Mate Pair library number <#> (<#> = 1,2,..,9).

    +

    Specifying data for hybrid assembly

    +

    --pacbio <file_name>
    +    File with PacBio CLR reads. For PacBio CCS reads use -s option. More information on PacBio reads is provided in section 3.1.

    +

    --nanopore <file_name>
    +    File with Oxford Nanopore reads.

    +

    --sanger <file_name>
    +    File with Sanger reads

    +

    --trusted-contigs <file_name>
    +    Reliable contigs of the same genome, which are likely to have no misassemblies and small rate of other errors (e.g. mismatches and indels). This option is not intended for contigs of the related species.

    +

    --untrusted-contigs <file_name>
    +    Contigs of the same genome, quality of which is average or unknown. Contigs of poor quality can be used but may introduce errors in the assembly. This option is also not intended for contigs of the related species.

    +


    +Specifying input data with YAML data set file (advanced)

    +

    An alternative way to specify an input data set for SPAdes is to create a YAML data set file. By using a YAML file you can provide an unlimited number of paired-end, mate-pair and unpaired libraries. Basically, YAML data set file is a text file, in which input libraries are provided as a comma-separated list in square brackets. Each library is provided in braces as a comma-separated list of attributes. The following attributes are available:

    +
      +
    • orientation ("fr", "rf", "ff")
    • +
    • type ("paired-end", "mate-pairs", "hq-mate-pairs", "single", "pacbio", "nanopore", "sanger", "trusted-contigs", "untrusted-contigs")
    • +
    • interlaced reads (comma-separated list of files with interlaced reads)
    • +
    • left reads (comma-separated list of files with left reads)
    • +
    • right reads (comma-separated list of files with right reads)
    • +
    • single reads (comma-separated list of files with single reads or unpaired reads from paired library)
    • +
    • merged reads (comma-separated list of files with merged reads)
    • +
    +

    To properly specify a library you should provide its type and at least one file with reads. Orientation is an optional attribute. Its default value is "fr" (forward-reverse) for paired-end libraries and "rf" (reverse-forward) for mate-pair libraries.

    +

    The value for each attribute is given after a colon. Comma-separated lists of files should be given in square brackets. For each file you should provide its full path in double quotes. Make sure that files with right reads are given in the same order as corresponding files with left reads.

    +

    For example, if you have one paired-end library splitted into two pairs of files:

    +
        lib_pe1_left_1.fastq
    +    lib_pe1_right_1.fastq
    +    lib_pe1_left_2.fastq
    +    lib_pe1_right_2.fastq
    +

    one mate-pair library:

    +
        lib_mp1_left.fastq
    +    lib_mp1_right.fastq
    +

    and PacBio CCS and CLR reads:

    +
        pacbio_ccs.fastq
    +    pacbio_clr.fastq
    +

    YAML file should look like this:

    +
        [
    +      {
    +        orientation: "fr",
    +        type: "paired-end",
    +        right reads: [
    +          "/FULL_PATH_TO_DATASET/lib_pe1_right_1.fastq",
    +          "/FULL_PATH_TO_DATASET/lib_pe1_right_2.fastq" 
    +        ],
    +        left reads: [
    +          "/FULL_PATH_TO_DATASET/lib_pe1_left_1.fastq",
    +          "/FULL_PATH_TO_DATASET/lib_pe1_left_2.fastq" 
    +        ]
    +      },
    +      {
    +        orientation: "rf",
    +        type: "mate-pairs",
    +        right reads: [
    +          "/FULL_PATH_TO_DATASET/lib_mp1_right.fastq" 
    +        ],
    +        left reads: [
    +          "/FULL_PATH_TO_DATASET/lib_mp1_left.fastq"
    +        ]
    +      },
    +      {
    +        type: "single",
    +        single reads: [
    +          "/FULL_PATH_TO_DATASET/pacbio_ccs.fastq" 
    +        ]
    +      },
    +      {
    +        type: "pacbio",
    +        single reads: [
    +          "/FULL_PATH_TO_DATASET/pacbio_clr.fastq" 
    +        ]
    +      }
    +    ]
    +

    Once you have created a YAML file save it with .yaml extension (e.g. as my_data_set.yaml) and run SPAdes using the --dataset option:
    +--dataset <your YAML file>
    +Notes:

    +
      +
    • The --dataset option cannot be used with any other options for specifying input data.
    • +
    • We recommend to nest all files with long reads of the same data type in a single library block.
    • +
    +

    +

    +

    Advanced options

    +

    -t <int> (or --threads <int>)
    +    Number of threads. The default value is 16.

    +

    -m <int> (or --memory <int>)
    +    Set memory limit in Gb. SPAdes terminates if it reaches this limit. The default value is 250 Gb. Actual amount of consumed RAM will be below this limit. Make sure this value is correct for the given machine. SPAdes uses the limit value to automatically determine the sizes of various buffers, etc.

    +

    --tmp-dir <dir_name>
    +    Set directory for temporary files from read error correction. The default value is <output_dir>/corrected/tmp

    +

    -k <int,int,...>
    +    Comma-separated list of k-mer sizes to be used (all values must be odd, less than 128 and listed in ascending order). If --sc is set the default values are 21,33,55. For multicell data sets K values are automatically selected using maximum read length (see note for assembling long Illumina paired reads for details). To properly select K values for IonTorrent data read section 3.3.

    +

    --cov-cutoff <float>
    +    Read coverage cutoff value. Must be a positive float value, or "auto", or "off". Default value is "off". When set to "auto" SPAdes automatically computes coverage threshold using conservative strategy. Note, that this option is not supported by metaSPAdes.

    +

    --phred-offset <33 or 64>
    +    PHRED quality offset for the input reads, can be either 33 or 64. It will be auto-detected if it is not specified.

    +

    +

    Examples

    +

    To test the toy data set, you can also run the following command from the SPAdes bin directory:

    +
        spades.py --pe1-1 ../share/spades/test_dataset/ecoli_1K_1.fq.gz \
    +    --pe1-2 ../share/spades/test_dataset/ecoli_1K_2.fq.gz -o spades_test
    +

    If you have your library separated into several pairs of files, for example:

    +
        lib1_forward_1.fastq
    +    lib1_reverse_1.fastq
    +    lib1_forward_2.fastq
    +    lib1_reverse_2.fastq
    +

    make sure that corresponding files are given in the same order:

    +
        spades.py --pe1-1 lib1_forward_1.fastq --pe1-2 lib1_reverse_1.fastq \
    +    --pe1-1 lib1_forward_2.fastq --pe1-2 lib1_reverse_2.fastq \
    +    -o spades_output
    +

    Files with interlacing paired-end reads or files with unpaired reads can be specified in any order with one file per option, for example:

    +
        spades.py --pe1-12 lib1_1.fastq --pe1-12 lib1_2.fastq \
    +    --pe1-s lib1_unpaired_1.fastq --pe1-s lib1_unpaired_2.fastq \
    +    -o spades_output    
    +

    If you have several paired-end and mate-pair reads, for example:

    +

    paired-end library 1

    +
        lib_pe1_left.fastq
    +    lib_pe1_right.fastq
    +

    mate-pair library 1

    +
        lib_mp1_left.fastq
    +    lib_mp1_right.fastq
    +

    mate-pair library 2

    +
        lib_mp2_left.fastq
    +    lib_mp2_right.fastq
    +

    make sure that files corresponding to each library are grouped together:

    +
        spades.py --pe1-1 lib_pe1_left.fastq --pe1-2 lib_pe1_right.fastq \
    +    --mp1-1 lib_mp1_left.fastq --mp1-2 lib_mp1_right.fastq \
    +    --mp2-1 lib_mp2_left.fastq --mp2-2 lib_mp2_right.fastq \
    +    -o spades_output
    +

    If you have IonTorrent unpaired reads, PacBio CLR and additional reliable contigs:

    +
        it_reads.fastq
    +    pacbio_clr.fastq
    +    contigs.fasta
    +

    run SPAdes with the following command:

    +
        spades.py --iontorrent -s it_reads.fastq \
    +    --pacbio pacbio_clr.fastq --trusted-contigs contigs.fastq \
    +    -o spades_output
    +

    If a single-read library is splitted into several files:

    +
        unpaired1_1.fastq
    +    unpaired1_2.fastq
    +    unpaired1_3.fasta
    +

    specify them as one library:

    +
        spades.py --s1 unpaired1_1.fastq \
    +    --s1 unpaired1_2.fastq --s1 unpaired1_3.fastq \
    +    -o spades_output
    +

    All options for specifying input data can be mixed if needed, but make sure that files for each library are grouped and files with left and right paired reads are listed in the same order.

    +

    +

    Assembling IonTorrent reads

    +

    Only FASTQ or BAM files are supported as input.

    +

    The selection of k-mer length is non-trivial for IonTorrent. If the dataset is more or less conventional (good coverage, not high GC, etc), then use our recommendation for long reads (e.g. assemble using k-mer lengths 21,33,55,77,99,127). However, due to increased error rate some changes of k-mer lengths (e.g. selection of shorter ones) may be required. For example, if you ran SPAdes with k-mer lengths 21,33,55,77 and then decided to assemble the same data set using more iterations and larger values of K, you can run SPAdes once again specifying the same output folder and the following options: --restart-from k77 -k 21,33,55,77,99,127 --mismatch-correction -o <previous_output_dir>. Do not forget to copy contigs and scaffolds from the previous run. We are planning to tackle issue of selecting k-mer lengths for IonTorrent reads in next versions.

    +

    You may need no error correction for Hi-Q enzyme at all. However, we suggest trying to assemble your data with and without error correction and select the best variant.

    +

    For non-trivial datasets (e.g. with high GC, low or uneven coverage) we suggest to enable single-cell mode (setting --sc option) and use k-mer lengths of 21,33,55.

    +

    +

    Assembling long Illumina paired reads (2x150 and 2x250)

    +

    Recent advances in DNA sequencing technology have led to a rapid increase in read length. Nowadays, it is a common situation to have a data set consisting of 2x150 or 2x250 paired-end reads produced by Illumina MiSeq or HiSeq2500. However, the use of longer reads alone will not automatically improve assembly quality. An assembler that can properly take advantage of them is needed.

    +

    SPAdes use of iterative k-mer lengths allows benefiting from the full potential of the long paired-end reads. Currently one has to set the assembler options up manually, but we plan to incorporate automatic calculation of necessary options soon.

    +

    Please note that in addition to the read length, the insert length also matters a lot. It is not recommended to sequence a 300bp fragment with a pair of 250bp reads. We suggest using 350-500 bp fragments with 2x150 reads and 550-700 bp fragments with 2x250 reads.

    +

    Multi-cell data set with read length 2x150

    +

    Do not turn off SPAdes error correction (BayesHammer module), which is included in SPAdes default pipeline.

    +

    If you have enough coverage (50x+), then you may want to try to set k-mer lengths of 21, 33, 55, 77 (selected by default for reads with length 150bp).

    +

    Make sure you run assembler with the --careful option to minimize number of mismatches in the final contigs.

    +

    We recommend that you check the SPAdes log file at the end of the each iteration to control the average coverage of the contigs.

    +

    For reads corrected prior to running the assembler:

    +
        spades.py -k 21,33,55,77 --careful --only-assembler <your reads> -o spades_output
    +

    To correct and assemble the reads:

    +
        spades.py -k 21,33,55,77 --careful <your reads> -o spades_output
    +

    Multi-cell data set with read lengths 2 x 250

    +

    Do not turn off SPAdes error correction (BayesHammer module), which is included in SPAdes default pipeline.

    +

    By default we suggest to increase k-mer lengths in increments of 22 until the k-mer length reaches 127. The exact length of the k-mer depends on the coverage: k-mer length of 127 corresponds to 50x k-mer coverage and higher. For read length 250bp SPAdes automatically chooses K values equal to 21, 33, 55, 77, 99, 127.

    +

    Make sure you run assembler with --careful option to minimize number of mismatches in the final contigs.

    +

    We recommend you to check the SPAdes log file at the end of the each iteration to control the average coverage of the contigs.

    +

    For reads corrected prior to running the assembler:

    +
        spades.py -k 21,33,55,77,99,127 --careful --only-assembler <your reads> -o spades_output
    +

    To correct and assemble the reads:

    +
        spades.py -k 21,33,55,77,99,127 --careful <your reads> -o spades_output
    +

    Single-cell data set with read lengths 2 x 150 or 2 x 250

    +

    The default k-mer lengths are recommended. For single-cell data sets SPAdes selects k-mer sizes 21, 33 and 55.

    +

    However, it might be tricky to fully utilize the advantages of long reads you have. Consider contacting us for more information and to discuss assembly strategy.
    +

    +

    +

    SPAdes output

    +

    SPAdes stores all output files in <output_dir> , which is set by the user.

    +
      +
    • <output_dir>/corrected/ directory contains reads corrected by BayesHammer in *.fastq.gz files; if compression is disabled, reads are stored in uncompressed *.fastq files
    • +
    • <output_dir>/scaffolds.fasta contains resulting scaffolds (recommended for use as resulting sequences)
    • +
    • <output_dir>/contigs.fasta contains resulting contigs
    • +
    • <output_dir>/assembly_graph.gfa contains SPAdes assembly graph and scaffolds paths in GFA 1.0 format
    • +
    • <output_dir>/assembly_graph.fastg contains SPAdes assembly graph in FASTG format
    • +
    • <output_dir>/contigs.paths contains paths in the assembly graph corresponding to contigs.fasta (see details below)
    • +
    • <output_dir>/scaffolds.paths contains paths in the assembly graph corresponding to scaffolds.fasta (see details below)
    • +
    +

    Contigs/scaffolds names in SPAdes output FASTA files have the following format:
    +>NODE_3_length_237403_cov_243.207
    +Here 3 is the number of the contig/scaffold, 237403 is the sequence length in nucleotides and 243.207 is the k-mer coverage for the last (largest) k value used. Note that the k-mer coverage is always lower than the read (per-base) coverage.

    +

    In general, SPAdes uses two techniques for joining contigs into scaffolds. First one relies on read pairs and tries to estimate the size of the gap separating contigs. The second one relies on the assembly graph: e.g. if two contigs are separated by a complex tandem repeat, that cannot be resolved exactly, contigs are joined into scaffold with a fixed gap size of 100 bp. Contigs produced by SPAdes do not contain N symbols.

    +

    To view FASTG and GFA files we recommend to use Bandage visualization tool. Note that sequences stored in assembly_graph.fastg correspond to contigs before repeat resolution (edges of the assembly graph). Paths corresponding to contigs after repeat resolution (scaffolding) are stored in contigs.paths (scaffolds.paths) in the format accepted by Bandage (see Bandage wiki for details). The example is given below.

    +

    Let the contig with the name NODE_5_length_100000_cov_215.651 consist of the following edges of the assembly graph:

    +
        >EDGE_2_length_33280_cov_199.702
    +    >EDGE_5_length_84_cov_321.414"
    +    >EDGE_3_length_111_cov_175.304
    +    >EDGE_5_length_84_cov_321.414"
    +    >EDGE_4_length_66661_cov_223.548
    +
    +

    Then, contigs.paths will contain the following record:

    +
        NODE_5_length_100000_cov_215.651
    +    2+,5-,3+,5-,4+
    +
    +

    Since the current version of Bandage does not accept paths with gaps, paths corresponding contigs/scaffolds jumping over a gap in the assembly graph are splitted by semicolon at the gap positions. For example, the following record

    +
        NODE_3_length_237403_cov_243.207
    +    21-,17-,15+,17-,16+;
    +    31+,23-,22+,23-,4-
    +
    +

    states that NODE_3_length_237403_cov_243.207 corresponds to the path with 10 edges, but jumps over a gap between edges EDGE_16_length_21503_cov_482.709 and EDGE_31_length_140767_cov_220.239.

    +

    The full list of <output_dir> content is presented below:

    +
      +
    • +

      scaffolds.fasta – resulting scaffolds (recommended for use as resulting sequences)

      +
    • +
    • +

      contigs.fasta – resulting contigs

      +
    • +
    • +

      assembly_graph.fastg – assembly graph

      +
    • +
    • +

      contigs.paths – contigs paths in the assembly graph

      +
    • +
    • +

      scaffolds.paths – scaffolds paths in the assembly graph

      +
    • +
    • +

      before_rr.fasta – contigs before repeat resolution

      +
    • +
    • +

      corrected/ – files from read error correction

      +
        +
      • configs/ – configuration files for read error correction
      • +
      • corrected.yaml – internal configuration file
      • +
      • Output files with corrected reads
      • +
      +
    • +
    • +

      params.txt – information about SPAdes parameters in this run

      +
    • +
    • +

      spades.log – SPAdes log

      +
    • +
    • +

      dataset.info – internal configuration file

      +
    • +
    • +

      input_dataset.yaml – internal YAML data set file

      +
    • +
    • +

      K<##>/ – directory containing intermediate files from the run with K=<##>. These files should not be used as assembly results; use resulting contigs/scaffolds in files mentioned above.

      +
    • +
    +

    SPAdes will overwrite these files and directories if they exist in the specified <output_dir>.

    +

    +

    plasmidSPAdes output

    +

    plasmidSPAdes outputs only DNA sequences from putative plasmids. Output file names and formats remain the same as in SPAdes (see previous section), with the following difference. For all contig names in contigs.fasta, scaffolds.fasta and assembly_graph.fastg we append suffix _component_X, where X is the id of the putative plasmid, which the contig belongs to. Note that plasmidSPAdes may not be able to separate similar plasmids and thus their contigs may appear with the same id.

    +

    +

    biosyntheticSPAdes output

    +

    biosyntheticSPAdes outputs three files of interest:

    +
      +
    • gene_clusters.fasta – contains DNA sequences from putative biosynthetic gene clusters (BGC). Since eash sample may contain multiple BGCs and biosyntheticSPAdes can output several putative DNA sequences for eash cluster, for each contig name we append suffix _cluster_X_candidate_Y, where X is the id of the BGC and Y is the id of the candidate from the BGC.
    • +
    • bgc_statistics.txt – contains statistics about BGC composition in the sample. First, it outputs number of domain hits in the sample. Then, for each BGC candidate we output domain order with positions on the corresponding DNA sequence from gene_clusters.fasta.
    • +
    • domain_graph.dot – contains domain graph structure, that can be used to assess complexity of the sample and structure of BGCs. For more information about domain graph construction, please refer to the paper.
    • +
    +

    +

    Assembly evaluation

    +

    QUAST may be used to generate summary statistics (N50, maximum contig length, GC %, # genes found in a reference list or with built-in gene finding tools, etc.) for a single assembly. It may also be used to compare statistics for multiple assemblies of the same data set (e.g., SPAdes run with different parameters, or several different assemblers).
    +

    +

    +

    Stand-alone binaries released within SPAdes package

    +

    +

    k-mer counting

    +

    To provide input data to SPAdes k-mer counting tool spades-kmercounter you may just specify files in SPAdes-supported formats without any flags (after all options) or provide dataset description file in YAML format.

    +

    Output: <output_dir>/final_kmers - unordered set of kmers in binary format. Kmers from both forward a
    +nd reverse-complementary reads are taken into account.

    +

    Output format: All kmers are written sequentially without any separators. Each kmer takes the same nu
    +mber of bits. One kmer of length K takes 2*K bits. Kmers are aligned by 64 bits. For example, one kme
    +r with length=21 takes 8 bytes, with length=33 takes 16 bytes, and with length=55 takes 16 bytes. Eac
    +h nucleotide is coded with 2 bits: 00 - A, 01 - C, 10 - G, 11 - T.

    +

    Example:

    +
        For kmer: AGCTCT
    +    Memory: 6 bits * 2 = 12, 64 bits (8 bytes)
    +    Let’s describe bytes:
    +    data[0] = AGCT -> 11 01 10 00 -> 0xd8                                
    +    data[1] = CT00 -> 00 00 11 01 -> 0x0d
    +    data[2] = 0000 -> 00 00 00 00 -> 0x00
    +    data[3] = 0000 -> 00 00 00 00 -> 0x00
    +    data[4] = 0000 -> 00 00 00 00 -> 0x00
    +    data[5] = 0000 -> 00 00 00 00 -> 0x00
    +    data[6] = 0000 -> 00 00 00 00 -> 0x00
    +    data[7] = 0000 -> 00 00 00 00 -> 0x00
    +
    +

    Synopsis: spades-kmercount [OPTION...] <input files>

    +

    The options are:

    +

    -d, --dataset file <file name>
    +dataset description (in YAML format), input files ignored

    +

    -k, --kmer <int>
    +k-mer length (default: 21)

    +

    -t, --threads <int>
    +number of threads to use (default: number of CPUs)

    +

    -w, --workdir <dir name>
    +working directory to use (default: current directory)

    +

    -b, --bufsize <int>
    +sorting buffer size in bytes, per thread (default 536870912)

    +

    -h, --help
    +print help message

    +

    +

    k-mer coverage read filter

    +

    spades-read-filter is a tool for filtering reads with median kmer coverage less than threshold.

    +

    To provide input data to SPAdes k-mer read filter tool spades-read-filter you should provide dataset description file in YAML format.

    +

    Synopsis: spades-read-filter [OPTION...] -d <yaml>

    +

    The options are:

    +

    -d, --dataset file <file name>
    +dataset description (in YAML format)

    +

    -k, --kmer <int>
    +k-mer length (default: 21)

    +

    -t, --threads <int>
    +number of threads to use (default: number of CPUs)

    +

    -o, --outdir <dir>
    +output directory to use (default: current directory)

    +

    -c, --cov <value>
    +median kmer count threshold (read pairs, s.t. kmer count median for BOTH reads LESS OR EQUAL to this value will be ignored)

    +

    -h, --help
    +print help message

    +

    +

    k-mer cardinality estimating

    +

    spades-kmer-estimating is a tool for estimating approximate number of unique k-mers in the provided reads. Kmers from reverse-complementary reads aren"t taken into account for k-mer cardinality estimating.

    +

    To provide input data to SPAdes k-mer cardinality estimating tool spades-kmer-estimating you should provide dataset description file in YAML format.

    +

    Synopsis: spades-kmer-estimating [OPTION...] -d <yaml>

    +

    The options are:

    +

    -d, --dataset file <file name>
    +dataset description (in YAML format)

    +

    -k, --kmer <int>
    +k-mer length (default: 21)

    +

    -t, --threads <int>
    +number of threads to use (default: number of CPUs)

    +

    -h, --help
    +print help message

    +

    +

    Graph construction

    +

    Graph construction tool spades-gbuilder has two mandatory options: dataset description file in YAML format and an output file name.

    +

    Synopsis: spades-gbuilder <dataset description (in YAML)> <output filename> [-k <value>] [-t <value>] [-tmpdir <dir>] [-b <value>] [-unitigs|-fastg|-gfa|-spades]

    +

    Additional options are:

    +

    -k <int>
    +k-mer length used for construction (must be odd)

    +

    -t <int>
    +number of threads

    +

    -tmp-dir <dir_name>
    +scratch directory to use

    +

    -b <int>
    +sorting buffer size (per thread, in bytes)

    +

    -unitigs
    +k-mer length used for construction (must be odd)

    +

    -fastg
    +output graph in FASTG format

    +

    -gfa
    +output graph in GFA1 format

    +

    -spades
    +output graph in SPAdes internal format

    +

    +

    Long read to graph alignment

    +

    +

    hybridSPAdes aligner

    +

    A tool spades-gmapper gives opportunity to extract long read alignments generated with hybridSPAdes pipeline options. It has three mandatory options: dataset description file in YAML format, graph file in GFA format and an output file name.

    +

    Synopsis: spades-gmapper <dataset description (in YAML)> <graph (in GFA)> <output filename> [-k <value>] [-t <value>] [-tmpdir <dir>]

    +

    Additional options are:

    +

    -k <int>
    +k-mer length that was used for graph construction

    +

    -t <int>
    +number of threads

    +

    -tmpdir <dir_name>
    +scratch directory to use

    +

    While spades-mapper is a solution for those who works on hybridSPAdes assembly and wants to get intermediate results, SPAligner is an end-product application for sequence-to-graph alignment with tunable parameters and output types.

    +

    +

    SPAligner

    +

    A tool for fast and accurate alignment of nucleotide sequences to assembly graphs. It takes file with sequences (in fasta/fastq format) and assembly in GFA format and outputs long read to graph alignment in various formats (such as tsv, fasta and GPA).

    +

    Synopsis: spaligner assembly/src/projects/spaligner_config.yaml -d <value> -s <value> -g <value> -k <value> [-t <value>] [-o <value>]

    +

    Parameters are:

    +

    -d <type>
    +long reads type: nanopore, pacbio

    +

    -s <filename>
    +file with sequences (in fasta/fastq)

    +

    -g <filename>
    +file with graph (in GFA)

    +

    -k <int>
    +k-mer length that was used for graph construction

    +

    -t <int>
    +number of threads (default: 8)

    +

    -o, --outdir <dir>
    +output directory to use (default: spaligner_result/)

    +

    For more information on parameters and options please refer to main SPAligner manual (assembler/src/projects/spaligner/README.md).

    +

    Also if you want to align protein sequences please refer to our pre-release version.

    +

    +

    Citation

    +

    If you use SPAdes in your research, please include Nurk, Bankevich et al., 2013 in your reference list. You may also add Bankevich, Nurk et al., 2012 instead.

    +

    In case you perform hybrid assembly ussing PacBio or Nanopore reads, you may also cite Antipov et al., 2015.

    +

    If you use multiple paired-end and/or mate-pair libraries you may also cite papers describing SPAdes repeat resolution algorithms Prjibelski et al., 2014 and Vasilinetc et al., 2015.

    +

    If you use metaSPAdes please cite Antipov et al., 2016.

    +

    If you use plasmidSPAdes please cite Antipov et al., 2016.

    +

    For rnaSPAdes citation use Bushmanova et al., 2019.

    +

    If you use biosyntheticSPAdes please cite Meleshko et al., 2019.

    +

    In addition, we would like to list your publications that use our software on our website. Please email the reference, the name of your lab, department and institution to spades.support@cab.spbu.ru.
    +

    +

    +

    Feedback and bug reports

    +

    Your comments, bug reports, and suggestions are very welcomed. They will help us to further improve SPAdes. If you have any troubles running SPAdes, please send us params.txt and spades.log from the directory <output_dir>.

    +

    You can leave your comments and bug reports at our GitHub repository tracker or sent it via e-mail: spades.support@cab.spbu.ru.






    \ No newline at end of file diff --git a/src/SPAdes-3.10.1-Linux/share/spades/pyyaml2/__init__.py b/src/SPAdes-3.14.0-Linux/share/spades/pyyaml2/__init__.py similarity index 100% rename from src/SPAdes-3.10.1-Linux/share/spades/pyyaml2/__init__.py rename to src/SPAdes-3.14.0-Linux/share/spades/pyyaml2/__init__.py diff --git a/src/SPAdes-3.10.1-Linux/share/spades/pyyaml2/composer.py b/src/SPAdes-3.14.0-Linux/share/spades/pyyaml2/composer.py similarity index 100% rename from src/SPAdes-3.10.1-Linux/share/spades/pyyaml2/composer.py rename to src/SPAdes-3.14.0-Linux/share/spades/pyyaml2/composer.py diff --git a/src/SPAdes-3.10.1-Linux/share/spades/pyyaml2/constructor.py b/src/SPAdes-3.14.0-Linux/share/spades/pyyaml2/constructor.py similarity index 100% rename from src/SPAdes-3.10.1-Linux/share/spades/pyyaml2/constructor.py rename to src/SPAdes-3.14.0-Linux/share/spades/pyyaml2/constructor.py diff --git a/src/SPAdes-3.10.1-Linux/share/spades/pyyaml2/cyaml.py b/src/SPAdes-3.14.0-Linux/share/spades/pyyaml2/cyaml.py similarity index 100% rename from src/SPAdes-3.10.1-Linux/share/spades/pyyaml2/cyaml.py rename to src/SPAdes-3.14.0-Linux/share/spades/pyyaml2/cyaml.py diff --git a/src/SPAdes-3.10.1-Linux/share/spades/pyyaml2/dumper.py b/src/SPAdes-3.14.0-Linux/share/spades/pyyaml2/dumper.py similarity index 100% rename from src/SPAdes-3.10.1-Linux/share/spades/pyyaml2/dumper.py rename to src/SPAdes-3.14.0-Linux/share/spades/pyyaml2/dumper.py diff --git a/src/SPAdes-3.10.1-Linux/share/spades/pyyaml2/emitter.py b/src/SPAdes-3.14.0-Linux/share/spades/pyyaml2/emitter.py similarity index 100% rename from src/SPAdes-3.10.1-Linux/share/spades/pyyaml2/emitter.py rename to src/SPAdes-3.14.0-Linux/share/spades/pyyaml2/emitter.py diff --git a/src/SPAdes-3.10.1-Linux/share/spades/pyyaml2/error.py b/src/SPAdes-3.14.0-Linux/share/spades/pyyaml2/error.py similarity index 100% rename from src/SPAdes-3.10.1-Linux/share/spades/pyyaml2/error.py rename to src/SPAdes-3.14.0-Linux/share/spades/pyyaml2/error.py diff --git a/src/SPAdes-3.10.1-Linux/share/spades/pyyaml2/events.py b/src/SPAdes-3.14.0-Linux/share/spades/pyyaml2/events.py similarity index 100% rename from src/SPAdes-3.10.1-Linux/share/spades/pyyaml2/events.py rename to src/SPAdes-3.14.0-Linux/share/spades/pyyaml2/events.py diff --git a/src/SPAdes-3.10.1-Linux/share/spades/pyyaml2/loader.py b/src/SPAdes-3.14.0-Linux/share/spades/pyyaml2/loader.py similarity index 100% rename from src/SPAdes-3.10.1-Linux/share/spades/pyyaml2/loader.py rename to src/SPAdes-3.14.0-Linux/share/spades/pyyaml2/loader.py diff --git a/src/SPAdes-3.10.1-Linux/share/spades/pyyaml2/nodes.py b/src/SPAdes-3.14.0-Linux/share/spades/pyyaml2/nodes.py similarity index 100% rename from src/SPAdes-3.10.1-Linux/share/spades/pyyaml2/nodes.py rename to src/SPAdes-3.14.0-Linux/share/spades/pyyaml2/nodes.py diff --git a/src/SPAdes-3.10.1-Linux/share/spades/pyyaml2/parser.py b/src/SPAdes-3.14.0-Linux/share/spades/pyyaml2/parser.py similarity index 100% rename from src/SPAdes-3.10.1-Linux/share/spades/pyyaml2/parser.py rename to src/SPAdes-3.14.0-Linux/share/spades/pyyaml2/parser.py diff --git a/src/SPAdes-3.10.1-Linux/share/spades/pyyaml2/reader.py b/src/SPAdes-3.14.0-Linux/share/spades/pyyaml2/reader.py similarity index 100% rename from src/SPAdes-3.10.1-Linux/share/spades/pyyaml2/reader.py rename to src/SPAdes-3.14.0-Linux/share/spades/pyyaml2/reader.py diff --git a/src/SPAdes-3.10.1-Linux/share/spades/pyyaml2/representer.py b/src/SPAdes-3.14.0-Linux/share/spades/pyyaml2/representer.py similarity index 100% rename from src/SPAdes-3.10.1-Linux/share/spades/pyyaml2/representer.py rename to src/SPAdes-3.14.0-Linux/share/spades/pyyaml2/representer.py diff --git a/src/SPAdes-3.10.1-Linux/share/spades/pyyaml2/resolver.py b/src/SPAdes-3.14.0-Linux/share/spades/pyyaml2/resolver.py similarity index 100% rename from src/SPAdes-3.10.1-Linux/share/spades/pyyaml2/resolver.py rename to src/SPAdes-3.14.0-Linux/share/spades/pyyaml2/resolver.py diff --git a/src/SPAdes-3.10.1-Linux/share/spades/pyyaml2/scanner.py b/src/SPAdes-3.14.0-Linux/share/spades/pyyaml2/scanner.py similarity index 100% rename from src/SPAdes-3.10.1-Linux/share/spades/pyyaml2/scanner.py rename to src/SPAdes-3.14.0-Linux/share/spades/pyyaml2/scanner.py diff --git a/src/SPAdes-3.10.1-Linux/share/spades/pyyaml2/serializer.py b/src/SPAdes-3.14.0-Linux/share/spades/pyyaml2/serializer.py similarity index 100% rename from src/SPAdes-3.10.1-Linux/share/spades/pyyaml2/serializer.py rename to src/SPAdes-3.14.0-Linux/share/spades/pyyaml2/serializer.py diff --git a/src/SPAdes-3.10.1-Linux/share/spades/pyyaml2/tokens.py b/src/SPAdes-3.14.0-Linux/share/spades/pyyaml2/tokens.py similarity index 100% rename from src/SPAdes-3.10.1-Linux/share/spades/pyyaml2/tokens.py rename to src/SPAdes-3.14.0-Linux/share/spades/pyyaml2/tokens.py diff --git a/src/SPAdes-3.10.1-Linux/share/spades/pyyaml3/__init__.py b/src/SPAdes-3.14.0-Linux/share/spades/pyyaml3/__init__.py similarity index 100% rename from src/SPAdes-3.10.1-Linux/share/spades/pyyaml3/__init__.py rename to src/SPAdes-3.14.0-Linux/share/spades/pyyaml3/__init__.py diff --git a/src/SPAdes-3.10.1-Linux/share/spades/pyyaml3/composer.py b/src/SPAdes-3.14.0-Linux/share/spades/pyyaml3/composer.py similarity index 100% rename from src/SPAdes-3.10.1-Linux/share/spades/pyyaml3/composer.py rename to src/SPAdes-3.14.0-Linux/share/spades/pyyaml3/composer.py diff --git a/src/SPAdes-3.10.1-Linux/share/spades/pyyaml3/constructor.py b/src/SPAdes-3.14.0-Linux/share/spades/pyyaml3/constructor.py similarity index 100% rename from src/SPAdes-3.10.1-Linux/share/spades/pyyaml3/constructor.py rename to src/SPAdes-3.14.0-Linux/share/spades/pyyaml3/constructor.py diff --git a/src/SPAdes-3.10.1-Linux/share/spades/pyyaml3/cyaml.py b/src/SPAdes-3.14.0-Linux/share/spades/pyyaml3/cyaml.py similarity index 100% rename from src/SPAdes-3.10.1-Linux/share/spades/pyyaml3/cyaml.py rename to src/SPAdes-3.14.0-Linux/share/spades/pyyaml3/cyaml.py diff --git a/src/SPAdes-3.10.1-Linux/share/spades/pyyaml3/dumper.py b/src/SPAdes-3.14.0-Linux/share/spades/pyyaml3/dumper.py similarity index 100% rename from src/SPAdes-3.10.1-Linux/share/spades/pyyaml3/dumper.py rename to src/SPAdes-3.14.0-Linux/share/spades/pyyaml3/dumper.py diff --git a/src/SPAdes-3.10.1-Linux/share/spades/pyyaml3/emitter.py b/src/SPAdes-3.14.0-Linux/share/spades/pyyaml3/emitter.py similarity index 100% rename from src/SPAdes-3.10.1-Linux/share/spades/pyyaml3/emitter.py rename to src/SPAdes-3.14.0-Linux/share/spades/pyyaml3/emitter.py diff --git a/src/SPAdes-3.10.1-Linux/share/spades/pyyaml3/error.py b/src/SPAdes-3.14.0-Linux/share/spades/pyyaml3/error.py similarity index 100% rename from src/SPAdes-3.10.1-Linux/share/spades/pyyaml3/error.py rename to src/SPAdes-3.14.0-Linux/share/spades/pyyaml3/error.py diff --git a/src/SPAdes-3.10.1-Linux/share/spades/pyyaml3/events.py b/src/SPAdes-3.14.0-Linux/share/spades/pyyaml3/events.py similarity index 100% rename from src/SPAdes-3.10.1-Linux/share/spades/pyyaml3/events.py rename to src/SPAdes-3.14.0-Linux/share/spades/pyyaml3/events.py diff --git a/src/SPAdes-3.10.1-Linux/share/spades/pyyaml3/loader.py b/src/SPAdes-3.14.0-Linux/share/spades/pyyaml3/loader.py similarity index 100% rename from src/SPAdes-3.10.1-Linux/share/spades/pyyaml3/loader.py rename to src/SPAdes-3.14.0-Linux/share/spades/pyyaml3/loader.py diff --git a/src/SPAdes-3.10.1-Linux/share/spades/pyyaml3/nodes.py b/src/SPAdes-3.14.0-Linux/share/spades/pyyaml3/nodes.py similarity index 100% rename from src/SPAdes-3.10.1-Linux/share/spades/pyyaml3/nodes.py rename to src/SPAdes-3.14.0-Linux/share/spades/pyyaml3/nodes.py diff --git a/src/SPAdes-3.10.1-Linux/share/spades/pyyaml3/parser.py b/src/SPAdes-3.14.0-Linux/share/spades/pyyaml3/parser.py similarity index 100% rename from src/SPAdes-3.10.1-Linux/share/spades/pyyaml3/parser.py rename to src/SPAdes-3.14.0-Linux/share/spades/pyyaml3/parser.py diff --git a/src/SPAdes-3.10.1-Linux/share/spades/pyyaml3/reader.py b/src/SPAdes-3.14.0-Linux/share/spades/pyyaml3/reader.py similarity index 100% rename from src/SPAdes-3.10.1-Linux/share/spades/pyyaml3/reader.py rename to src/SPAdes-3.14.0-Linux/share/spades/pyyaml3/reader.py diff --git a/src/SPAdes-3.10.1-Linux/share/spades/pyyaml3/representer.py b/src/SPAdes-3.14.0-Linux/share/spades/pyyaml3/representer.py similarity index 100% rename from src/SPAdes-3.10.1-Linux/share/spades/pyyaml3/representer.py rename to src/SPAdes-3.14.0-Linux/share/spades/pyyaml3/representer.py diff --git a/src/SPAdes-3.10.1-Linux/share/spades/pyyaml3/resolver.py b/src/SPAdes-3.14.0-Linux/share/spades/pyyaml3/resolver.py similarity index 100% rename from src/SPAdes-3.10.1-Linux/share/spades/pyyaml3/resolver.py rename to src/SPAdes-3.14.0-Linux/share/spades/pyyaml3/resolver.py diff --git a/src/SPAdes-3.10.1-Linux/share/spades/pyyaml3/scanner.py b/src/SPAdes-3.14.0-Linux/share/spades/pyyaml3/scanner.py similarity index 100% rename from src/SPAdes-3.10.1-Linux/share/spades/pyyaml3/scanner.py rename to src/SPAdes-3.14.0-Linux/share/spades/pyyaml3/scanner.py diff --git a/src/SPAdes-3.10.1-Linux/share/spades/pyyaml3/serializer.py b/src/SPAdes-3.14.0-Linux/share/spades/pyyaml3/serializer.py similarity index 100% rename from src/SPAdes-3.10.1-Linux/share/spades/pyyaml3/serializer.py rename to src/SPAdes-3.14.0-Linux/share/spades/pyyaml3/serializer.py diff --git a/src/SPAdes-3.10.1-Linux/share/spades/pyyaml3/tokens.py b/src/SPAdes-3.14.0-Linux/share/spades/pyyaml3/tokens.py similarity index 100% rename from src/SPAdes-3.10.1-Linux/share/spades/pyyaml3/tokens.py rename to src/SPAdes-3.14.0-Linux/share/spades/pyyaml3/tokens.py diff --git a/src/SPAdes-3.14.0-Linux/share/spades/rnaspades_manual.html b/src/SPAdes-3.14.0-Linux/share/spades/rnaspades_manual.html new file mode 100644 index 0000000..7149158 --- /dev/null +++ b/src/SPAdes-3.14.0-Linux/share/spades/rnaspades_manual.html @@ -0,0 +1,132 @@ + + + rnaSPAdes manual + + + +

    rnaSPAdes manual

    + +1. About rnaSPAdes
    +2. rnaSPAdes specifics
    +    2.1. Running rnaSPAdes
    +    2.2. RNA-specific options
    +    2.3. rnaSPAdes output
    +3. Assembly evaluation
    +4. Citation
    +5. Feedback and bug reports
    + + +

    1 About rnaSPAdes

    + +

    rnaSPAdes is a tool for de novo transcriptome assembly from RNA-Seq data and is suitable for all kind of organisms. rnaSPAdes is a part of SPAdes package since version 3.9. Information about SPAdes download, requirements, installation and basic options can be found in SPAdes manual. Below you may find information about differences between SPAdes and rnaSPAdes. + + +

    2 rnaSPAdes specifics

    + + +

    2.1 Running rnaSPAdes

    +

    +To run rnaSPAdes use + +

    +
    +    rnaspades.py [options] -o <output_dir>
    +
    +
    + +or + +
    +
    +    spades.py --rna [options] -o <output_dir>
    +
    +
    + +Note that we assume that SPAdes installation directory is added to the PATH variable (provide full path to rnaSPAdes executable otherwise: <rnaspades installation dir>/rnaspades.py). + +

    Here are several notes regarding rnaSPAdes options: +

      +
    • rnaSPAdes take as an input at least one paired-end or single-end library. For hybrid assembly you can use PacBio or Oxford Nanopore reads.
    • +
    • rnaSPAdes does not support --careful and --cov-cutoff options.
    • +
    • rnaSPAdes is not compatible with other pipeline options such as --meta, --sc and --plasmid. If you wish to assemble metatranscriptomic data just run rnaSPAdes as it is.
    • +
    • By default rnaSPAdes uses 2 k-mer sizes, which are automatically detected using read length (approximately one third and half of the maximal read length). We recommend not to change this parameter because smaller k-mer sizes typically result in multiple chimeric (misassembled) transcripts. In case you have any doubts about your run, do not hesitate to contact us using e-mail given below.
    • +
    • Although rnaSPAdes supports IonTorrent reads, it was not sufficiently tested on such kind of data.
    • +
    + + + +

    2.2 RNA-specific options

    + +Assembling strand-specific data +

    rnaSPAdes supports strand-specific RNA-Seq datasets. You can set strand-specific type using the following option: + +

    + --ss <type>
    +     Use <type> = rf when first read in pair corresponds to reverse gene strand +(antisense data, e.g. obtained via dUTP protocol) and <type> = fr otherwise (forward). + Older deprecated syntax is --ss-rf and --ss-fr. +

    + +

    +Note, that strand-specificity is not related and should not be confused with FR and RF orientation of paired reads. RNA-Seq paired-end reads typically have forward-reverse orientation (--> <--), which is assumed by default and no additional options are needed (see main manual for deatails). +

    +If the data set is single-end use --ss rf option in case when reads are antisense and --ss fr otherwise. +
    + +

    Hybrid transcriptome assembly +

    rnaSPAdes now supports conventional --pacbio and --nanopore options (see SPAdes manual). Moreover, in addition to +long reads you may also provide a separate file with reads capturing the entire transcript sequences using the following options. Full-length transcripts in such reads can be +typically detected using the adapters. Note, that FL reads should be trimmed so that the adapters are excluded. +

    + --fl-rna <file_name>
    +     File with PacBio/Nanopore/contigs that capture full-length transcripts. +

    + + + +

    2.3 rnaSPAdes output

    +

    +rnaSPAdes outputs one main FASTA file named transcripts.fasta. The corresponding file with paths in the assembly_graph.fastg is transcripts.paths. + +

    +In addition rnaSPAdes outputs transcripts with different level of filtration into <output_dir>/:
    +

      +
    • hard_filtered_transcripts.fasta – includes only long and reliable transcripts with rather high expression.
    • +
    • soft_filtered_transcripts.fasta – includes short and low-expressed transcipts, likely to contain junk sequences.
    • +
    +We reccomend to use main transcripts.fasta file in case you don't have any specific needs for you projects. Do not hesitate to contact us using e-mail given below. + +

    + Contigs/scaffolds names in rnaSPAdes output FASTA files have the following format:
    >NODE_97_length_6237_cov_11.9819_g8_i2
    Similarly to SPAdes, 97 is the number of the transcript, 6237 is its sequence length in nucleotides and 11.9819 is the k-mer coverage. Note that the k-mer coverage is always lower than the read (per-base) coverage. g8_i2 correspond to the gene number 8 and isoform number 2 within this gene. Transcripts with the same gene number are presumably received from same or somewhat similar (e.g. paralogous) genes. Note, that the prediction is based on the presence of shared sequences in the transcripts and is very approximate. + + +

    3 Assembly evaluation

    + +

    + rnaQUAST may be used for transcriptome assembly quality assessment for model organisms when reference genome and gene database are available. rnaQUAST also includes BUSCO and GeneMarkS-T tools for de novo evaluation. +
    + + +

    4 Citation

    +

    +If you use rnaSPAdes in your research, please include Bushmanova et al., 2019 in your reference list. + + + +

    5 Feedback and bug reports

    + +

    + Your comments, bug reports, and suggestions are very welcomed. They will help us to further improve rnaSPAdes. + If you have any troubles running rnaSPAdes, please send us params.txt and spades.log from the directory <output_dir>. + +

    + You can leave your comments and bug reports at our GitHub repository tracker or sent it via e-mail: spades.support@cab.spbu.ru. + +




    + + + diff --git a/src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/commands_parser.py b/src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/commands_parser.py new file mode 100644 index 0000000..5ef26a1 --- /dev/null +++ b/src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/commands_parser.py @@ -0,0 +1,80 @@ +#!/usr/bin/env python + +############################################################################ +# Copyright (c) 2019 Saint Petersburg State University +# All Rights Reserved +# See file LICENSE for details. +############################################################################ + +import random +import string + +import support +import sys + + +class Command(object): + def __init__(self, STAGE, path, args, short_name, config_dir="", + del_after=None, output_files=None): + self.STAGE = STAGE + self.path = path + self.args = args + self.short_name = short_name + self.config_dir = config_dir + self.del_after = del_after + if self.del_after is None: + self.del_after = [] + self.output_files = output_files + if self.output_files is None: + self.output_files = [] + + def to_list(self): + return [self.path] + self.args + + def __str__(self): + return ' '.join(self.to_list()) + + def run(self, log): + support.sys_call(self.to_list(), log) + + def to_dict(self): + return {"STAGE": self.STAGE, + "path": self.path, + "args": self.args, + "short_name": self.short_name, + "config_dir": self.config_dir, + "output_files": self.output_files, + "del_after": self.del_after} + + +def write_commands_to_sh(commands, output_file): + with open(output_file, 'w') as fw: + fw.write("set -e\n") + for command in commands: + fw.write(command.__str__() + "\n") + + +def write_commands_to_yaml(commands, output_file): + if sys.version.startswith("2."): + import pyyaml2 as yaml + elif sys.version.startswith("3."): + import pyyaml3 as yaml + + data = [command.to_dict() for command in commands] + + with open(output_file, 'w') as f: + yaml.dump(data, f, default_flow_style=False) + + +def read_commands_from_yaml(yaml_fpath): + if sys.version.startswith("2."): + import pyyaml2 as yaml + elif sys.version.startswith("3."): + import pyyaml3 as yaml + + with open(yaml_fpath) as stream: + data = yaml.load(stream) + commands = [] + for kwargs in data: + commands.append(Command(**kwargs)) + return commands diff --git a/src/SPAdes-3.10.1-Linux/share/spades/spades_pipeline/common/SeqIO.py b/src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/common/SeqIO.py similarity index 81% rename from src/SPAdes-3.10.1-Linux/share/spades/spades_pipeline/common/SeqIO.py rename to src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/common/SeqIO.py index 9d5b2b9..4b7f2a8 100644 --- a/src/SPAdes-3.10.1-Linux/share/spades/spades_pipeline/common/SeqIO.py +++ b/src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/common/SeqIO.py @@ -4,14 +4,20 @@ # See file LICENSE for details. ############################################################################ -import itertools +import codecs +import gzip import sys +fasta_ext = ['.fa', '.fas', '.fasta', '.seq', '.fsa', '.fna', '.ffn', '.frn'] +fastq_ext = ['.fq', 'fastq'] + + def Open(f, mode): if f.endswith(".gz"): - return gzip.open(f, mode) + return codecs.getreader('UTF-8')(gzip.open(f, mode)) else: - return open(f, mode) + return codecs.open(f, mode, encoding='utf-8') + class Reader: def __init__(self, handler): @@ -52,17 +58,15 @@ def ReadUntillFill(self, buf_size): result.append(self.Top().strip()) cnt += len(self.Top().strip()) self.TrashCash() - assert(cnt == buf_size) + assert (cnt == buf_size) return "".join(result) - - def EOF(self): return self.Top() == "" class SeqRecord: - def __init__(self, seq, id, qual = None): + def __init__(self, seq, id, qual=None): if qual != None and len(qual) != len(seq): sys.stdout.write("oppa" + id + "oppa") assert qual == None or len(qual) == len(seq) @@ -83,10 +87,11 @@ def QualSubseq(self, l, r): def subseq(self, l, r): if l != 0 or r != len(self.seq): - return SeqRecord(self.seq[l:r], self.id + "(" + str(l + 1) +"-" + str(r) + ")", self.QualSubseq(l, r)) + return SeqRecord(self.seq[l:r], self.id + "(" + str(l + 1) + "-" + str(r) + ")", self.QualSubseq(l, r)) else: return self + def parse(handler, file_type): assert file_type in ["fasta", "fastq"] if file_type == "fasta": @@ -94,31 +99,35 @@ def parse(handler, file_type): if file_type == "fastq": return parse_fastq(handler) + def parse_fasta(handler): reader = Reader(handler) while not reader.EOF(): rec_id = reader.readline().strip() - assert(rec_id[0] == '>') + assert (rec_id[0] == '>') rec_seq = reader.ReadUntill(lambda s: s.startswith(">")) yield SeqRecord(rec_seq, rec_id[1:]) + def parse_fastq(handler): reader = Reader(handler) while not reader.EOF(): rec_id = reader.readline().strip() - assert(rec_id[0] == '@') + assert (rec_id[0] == '@') rec_seq = reader.ReadUntill(lambda s: s.startswith("+")) tmp = reader.readline() - assert(tmp[0] == '+') + assert (tmp[0] == '+') rec_qual = reader.ReadUntillFill(len(rec_seq)) yield SeqRecord(rec_seq, rec_id[1:], rec_qual) + def parse(handler, file_type): if file_type == "fasta": return parse_fasta(handler) elif file_type == "fastq": return parse_fastq(handler) + def write(rec, handler, file_type): if file_type == "fasta": handler.write(">" + rec.id + "\n") @@ -135,6 +144,7 @@ def FilterContigs(input_handler, output_handler, f, file_type): if f(contig): write(contig, output_handler, file_type) + def RemoveNs(input_handler, output_handler): for contig in parse(input_handler, "fasta"): l = 0 @@ -145,3 +155,28 @@ def RemoveNs(input_handler, output_handler): r -= 1 if r > l: write(SeqRecord(contig.seq[l:r], contig.id)) + + +def is_fasta(file_name): + for ext in fasta_ext: + if ext in file_name: + return True + + return False + + +def is_fastq(file_name): + for ext in fastq_ext: + if ext in file_name: + return True + + return False + + +def get_read_file_type(file_name): + if is_fastq(file_name): + return 'fastq' + elif is_fasta(file_name): + return 'fasta' + else: + return None diff --git a/src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/common/__init__.py b/src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/common/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/SPAdes-3.10.1-Linux/share/spades/spades_pipeline/common/alignment.py b/src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/common/alignment.py similarity index 90% rename from src/SPAdes-3.10.1-Linux/share/spades/spades_pipeline/common/alignment.py rename to src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/common/alignment.py index d793844..129997c 100644 --- a/src/SPAdes-3.10.1-Linux/share/spades/spades_pipeline/common/alignment.py +++ b/src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/common/alignment.py @@ -4,11 +4,13 @@ # See file LICENSE for details. ############################################################################ -import support import os import shutil -def align_bwa_pe_lib(command, index, reads_file1, reads_file2, work_dir, log, threads = 1): +import support + + +def align_bwa_pe_lib(command, index, reads_file1, reads_file2, work_dir, log, threads=1): log.info("Aligning paired-end library") log.info("Left reads: " + reads_file1) log.info("Right reads: " + reads_file2) @@ -21,12 +23,13 @@ def align_bwa_pe_lib(command, index, reads_file1, reads_file2, work_dir, log, th result = os.path.join(work_dir, "alignment.sam") log.info("Starting alignment of reads using bwa. See detailed log in " + log_file) log.info("Starting read alignment. See detailed log in " + log_file) - support.universal_sys_call([command, "mem", "-t", str(threads), "-S", "-M", index, reads_file1, reads_file2], log, result, err_log_file) + support.universal_sys_call([command, "mem", "-t", str(threads), "-S", "-M", index, reads_file1, reads_file2], log, + result, err_log_file) log.info("Done. See result in " + result) return result -def index_bwa(command, log, reference, work_dir, algorithm = "is"): +def index_bwa(command, log, reference, work_dir, algorithm="is"): if os.path.exists(work_dir): shutil.rmtree(work_dir) os.makedirs(work_dir) @@ -53,8 +56,7 @@ def align_bwa_pe_libs(command, index, reads, work_dir, log, threads): return result - -def align_bwa(command, reference, dataset, work_dir, log = None, threads = 1): +def align_bwa(command, reference, dataset, work_dir, log=None, threads=1): if log == None: log = logging.getLogger('') if os.path.exists(work_dir): diff --git a/src/SPAdes-3.10.1-Linux/share/spades/spades_pipeline/common/parallel_launcher.py b/src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/common/parallel_launcher.py similarity index 90% rename from src/SPAdes-3.10.1-Linux/share/spades/spades_pipeline/common/parallel_launcher.py rename to src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/common/parallel_launcher.py index df2f0a0..798f8a2 100644 --- a/src/SPAdes-3.10.1-Linux/share/spades/spades_pipeline/common/parallel_launcher.py +++ b/src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/common/parallel_launcher.py @@ -4,19 +4,15 @@ # See file LICENSE for details. ############################################################################ +import logging import multiprocessing -import os -import shlex -import subprocess import sys -import signal -import time import traceback -import logging __author__ = 'anton' import subprocess + class FunctionTask: def __init__(self, f): self.f = f @@ -29,12 +25,15 @@ def __call__(self, barcode): return 1 return 0 + class PseudoLambda: def __init__(self): pass + def __call__(self, task): task.run() + def GetHandlers(output_file_pattern, err_file_pattern, bid): if output_file_pattern == "": output_file_pattern = "/dev/null" @@ -44,8 +43,9 @@ def GetHandlers(output_file_pattern, err_file_pattern, bid): else: return (output, open(err_file_pattern.format(bid), "a")) + class ExternalCallTask: - def __init__(self, output_pattern = "", err_pattern = "", log_name = None): + def __init__(self, output_pattern="", err_pattern="", log_name=None): self.output_pattern = output_pattern self.err_pattern = err_pattern self.log_name = log_name @@ -56,7 +56,7 @@ def __call__(self, data): if self.log_name is not None: logging.getLogger(self.log_name).info("Starting: " + str(bid)) import shlex - return_code = subprocess.call(shlex.split(command), stdout = output, stderr = err) + return_code = subprocess.call(shlex.split(command), stdout=output, stderr=err) if return_code == 0: logging.getLogger(self.log_name).info("Successfully finished: " + str(bid)) else: @@ -64,14 +64,14 @@ def __call__(self, data): return return_code - def run_in_parallel(task, material, threads): result = call_in_parallel(task, material, threads) errors = len(material) - result.count(0) return errors + def call_in_parallel(task, material, threads): pool = multiprocessing.Pool(threads) result = pool.map_async(task, material).get(1000000000) -# result = pool.map(call, commands) + # result = pool.map(call, commands) return result diff --git a/src/SPAdes-3.10.1-Linux/share/spades/spades_pipeline/common/sam_parser.py b/src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/common/sam_parser.py similarity index 86% rename from src/SPAdes-3.10.1-Linux/share/spades/spades_pipeline/common/sam_parser.py rename to src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/common/sam_parser.py index f247c2d..10f1550 100644 --- a/src/SPAdes-3.10.1-Linux/share/spades/spades_pipeline/common/sam_parser.py +++ b/src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/common/sam_parser.py @@ -7,9 +7,11 @@ import os import sys + def StringStartsFromPrefix(string, prefix): return string[:len(prefix)] == prefix + def CIGAR_to_List(cigar): delims = ["M", "I", "D", "N", "S", "H", "P", "=", "X"] cigar_list = list() @@ -24,6 +26,7 @@ def CIGAR_to_List(cigar): cur_num += s return [cigar_list, num_list] + def UpdateAlignmentLength(align_len, cigar_char, cigar_len, seq_len): if cigar_char == "M" or cigar_char == "D" or cigar_char == "N": align_len += cigar_len @@ -31,6 +34,7 @@ def UpdateAlignmentLength(align_len, cigar_char, cigar_len, seq_len): align_len = seq_len return align_len + ############################# SAM Config class ############################# class SAM_Config: @@ -61,20 +65,21 @@ class SAM_Config: seq_index = 9 qual_index = 10 + ############################# SAM Entry class ############################# class SAM_entry: - query_name = "" # string - flag = 0 # int - target_name = "" # string - pos = 0 # int - mapping_qiality = 0 # int - cigar = "" # string - mate_target_name = "" # string - mate_pos = 0 # int - tlen = 0 # int - seq = "" # string - qual = "" # string + query_name = "" # string + flag = 0 # int + target_name = "" # string + pos = 0 # int + mapping_qiality = 0 # int + cigar = "" # string + mate_target_name = "" # string + mate_pos = 0 # int + tlen = 0 # int + seq = "" # string + qual = "" # string alen = 0 sam_config = SAM_Config() @@ -107,9 +112,10 @@ def __init__(self, alignment_string): self.ComputeAlignmentLength() - #if self.cigar != "101M": + # if self.cigar != "101M": # self.Print() + ############################# SAM Parser class ############################# class SAMEntryInfo: @@ -127,9 +133,11 @@ def __init__(self, tid, tname, pos, alen, seq, flag, name, qual, cigar): self.qual = qual self.cigar = cigar - def Print(self): - sys.stdout.write(self.name + " " + str(self.tid) + " " + str(self.pos) + " " + str(self.alen) + " " + str(self.is_unmapped) + " " + str(self.proper_alignment) + " " + str(self.flag) + " " + str(self.secondary) + "\n") + sys.stdout.write(self.name + " " + str(self.tid) + " " + str(self.pos) + " " + str(self.alen) + " " + str( + self.is_unmapped) + " " + str(self.proper_alignment) + " " + str(self.flag) + " " + str( + self.secondary) + "\n") + class SamIter: def __init__(self, sam): @@ -141,25 +149,25 @@ def __next__(self): return self.next() def next(self): - while self.iter_index < self.sam.NumEntries(): + while self.iter_index < self.sam.NumEntries(): entry = self.sam.entries[self.iter_index] tid = self.sam.target_map[entry.target_name] - sam_iterator = SAMEntryInfo(tid, entry.target_name, entry.pos, entry.alen, entry.seq, entry.flag, entry.query_name, entry.qual, entry.cigar) + sam_iterator = SAMEntryInfo(tid, entry.target_name, entry.pos, entry.alen, entry.seq, entry.flag, + entry.query_name, entry.qual, entry.cigar) self.iter_index += 1 if not sam_iterator.secondary: return sam_iterator raise StopIteration() - class Samfile: - headers = list() # is not used - queries = list() # is not used - targets = list() # lines corresponding to references. Can be parsed - programs = list() # is not used - comments = list() # something strange - entries = list() # list of SAM_entry objects - target_map = dict() # map "target" -> index + headers = list() # is not used + queries = list() # is not used + targets = list() # lines corresponding to references. Can be parsed + programs = list() # is not used + comments = list() # something strange + entries = list() # list of SAM_entry objects + target_map = dict() # map "target" -> index # auxiliaries sam_config = SAM_Config() @@ -193,7 +201,6 @@ def UpdateTargetFields(self, line): target_name = target_name[len(self.sam_config.sq_tname_prefix):] self.target_map[target_name] = len(self.targets) - 1 - def InitFields(self): self.targets = list() self.headers = list() @@ -223,7 +230,7 @@ def __init__(self, filename): self.target_map["*"] = -1 for line in lines: line = line.strip() - + # line is reference sequence dictionary if self.IsLineReferenceDescr(line): self.UpdateTargetFields(line) @@ -255,11 +262,13 @@ def __iter__(self): def gettid(self, tname): return self.target_map[tname] + def chain_iter(iterators): for it in iterators: for element in it: yield element + class SamChain: def __init__(self, sam_files): self.sam_files = sam_files @@ -273,14 +282,13 @@ def gettid(self, tname): return sam.gettid(tname) return None - ############################# test -#sam_file = "example.sam" -#sam_parser = Samfile(sam_file) -#sam_parser.PrintStats() -#i = 0 -#for e in sam_parser: +# sam_file = "example.sam" +# sam_parser = Samfile(sam_file) +# sam_parser.PrintStats() +# i = 0 +# for e in sam_parser: # if i >= 5: # break # e.Print() diff --git a/src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/executors/executor_local.py b/src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/executors/executor_local.py new file mode 100644 index 0000000..43ece59 --- /dev/null +++ b/src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/executors/executor_local.py @@ -0,0 +1,74 @@ +#!/usr/bin/env python + +############################################################################ +# Copyright (c) 2019 Saint Petersburg State University +# All Rights Reserved +# See file LICENSE for details. +############################################################################ + +import os +import shutil +import support +import executors +import commands_parser +import options_storage + + +class Executor(executors.ExecutorBase): + def __init__(self, log): + super(Executor, self).__init__(log) + + def execute(self, commands): + for num in range(len(commands)): + command = commands[num] + if options_storage.args.continue_mode: + stage_checkpoint_path = options_storage. get_stage_filename(num, command.short_name) + if os.path.isfile(stage_checkpoint_path) and \ + ("_start" not in command.short_name) and \ + ("_finish" not in command.short_name): + self.log.info("===== Skipping %s (already processed)" % command.STAGE) + continue + + if "_finish" not in command.short_name: + self.log.info("\n===== %s started. \n" % command.STAGE) + + if command.__str__() != "true": + self.log.info("\n== Running: %s\n" % command.__str__()) + command.run(self.log) + + self.rm_files(command) + self.check_output(command) + + if "_start" not in command.short_name: + self.log.info("\n===== %s finished. \n" % command.STAGE) + + self.touch_file(command, num) + if options_storage.args.stop_after == command.short_name or \ + ("_finish" in command.short_name and + options_storage.args.stop_after == command.short_name.split('_')[0]): + self.log.info("\n======= Skipping the rest of SPAdes " + "pipeline (--stop-after was set to '%s'). " + "You can continue later with --continue or " + "--restart-from options\n" % options_storage.args.stop_after) + break + + def rm_files(self, command): + for fpath in command.del_after: + if os.path.isdir(fpath): + shutil.rmtree(fpath) + elif os.path.isfile(fpath): + os.remove(fpath) + + def check_output(self, command): + for fpath in command.output_files: + if not os.path.isfile(fpath): + support.error(command.STAGE + " finished abnormally: %s not found!" % fpath) + + def dump_commands(self, commands, outputfile): + commands_parser.write_commands_to_sh(commands, outputfile) + + def touch_file(self, command, num): + path = options_storage.get_stage_filename(num, command.short_name) + if not os.path.exists(os.path.dirname(path)): + os.makedirs(os.path.dirname(path)) + open(path, 'a').close() \ No newline at end of file diff --git a/src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/executors/executor_save_yaml.py b/src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/executors/executor_save_yaml.py new file mode 100644 index 0000000..d4488fb --- /dev/null +++ b/src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/executors/executor_save_yaml.py @@ -0,0 +1,30 @@ +#!/usr/bin/env python + +############################################################################ +# Copyright (c) 2019 Saint Petersburg State University +# All Rights Reserved +# See file LICENSE for details. +############################################################################ + +import os +import executors +import commands_parser +import options_storage + + +class Executor(executors.ExecutorBase): + def __init__(self, log): + super(Executor, self).__init__(log) + + def execute(self, commands): + super(Executor, self).execute(commands) + commands_parser.write_commands_to_sh(commands, os.path.join(options_storage.args.output_dir, "run_spades.sh")) + commands_parser.write_commands_to_yaml(commands, + os.path.join(options_storage.args.output_dir, + "run_spades.yaml")) + + def dump_commands(self, commands, outputfile): + commands_parser.write_commands_to_sh(commands, outputfile) + + def touch_file(self, command): + pass diff --git a/src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/executors/executors.py b/src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/executors/executors.py new file mode 100644 index 0000000..645f3f0 --- /dev/null +++ b/src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/executors/executors.py @@ -0,0 +1,33 @@ +#!/usr/bin/env python + +############################################################################ +# Copyright (c) 2019 Saint Petersburg State University +# All Rights Reserved +# See file LICENSE for details. +############################################################################ + +import os +from abc import ABCMeta, abstractmethod + +import options_storage +import support +import commands_parser + + +class ExecutorBase(object): + __metaclass__ = ABCMeta + + def __init__(self, log): + self.log = log + + @abstractmethod + def execute(self, commands): + pass + + @abstractmethod + def dump_commands(self, commands, outputfile): + pass + + @abstractmethod + def touch_file(self, command): + pass diff --git a/src/SPAdes-3.10.1-Linux/share/spades/spades_pipeline/lucigen_nxmate.py b/src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/lucigen_nxmate.py similarity index 82% rename from src/SPAdes-3.10.1-Linux/share/spades/spades_pipeline/lucigen_nxmate.py rename to src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/lucigen_nxmate.py index 6d147dd..a612532 100644 --- a/src/SPAdes-3.10.1-Linux/share/spades/spades_pipeline/lucigen_nxmate.py +++ b/src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/lucigen_nxmate.py @@ -11,36 +11,37 @@ # by Scott Monsma, Copyright (c) Lucigen Corp July 2014 - based on NxSeqFOS-SplitBfa4.py # Splits 'mates_ICC4_' files into left and right insert sequences by finding the Junction Code(s) # usage: copy IlluminaNxSeqJunction-Split6.py and ParseFastq.py into a directory with your fastq files to process -#cd into directory with .py and .fastq -#make sure your read 1 filename contains '_R1_' and read 2 filename contains '_R2_' -#at command prompt type 'python IlluminaNxSeqJunction-Split7.py 'mates_ICC4_your-R1-filename.fastq' and hit enter -#split sequences are saved if longer than minseq -#output files are named 'R1_IJS7_mates_ICC4_your-R1-filename.fastq' and 'R2_IJS7_mates_ICC4_your-R2-filename.fastq' which are the trimmed mate pairs, and -#'unsplit_IJS7_yourfilename.fastq' which contains interleaved reads where no junction was found. - -#IlluminaChimera-Clean4 by Scott Monsma, Lucigen Corp Copyright (C) July 2014 +# cd into directory with .py and .fastq +# make sure your read 1 filename contains '_R1_' and read 2 filename contains '_R2_' +# at command prompt type 'python IlluminaNxSeqJunction-Split7.py 'mates_ICC4_your-R1-filename.fastq' and hit enter +# split sequences are saved if longer than minseq +# output files are named 'R1_IJS7_mates_ICC4_your-R1-filename.fastq' and 'R2_IJS7_mates_ICC4_your-R2-filename.fastq' which are the trimmed mate pairs, and +# 'unsplit_IJS7_yourfilename.fastq' which contains interleaved reads where no junction was found. + +# IlluminaChimera-Clean4 by Scott Monsma, Lucigen Corp Copyright (C) July 2014 # usage: copy IlluminaChimera-Clean4.py and ParseFastq.py into a directory with your fastq file to process -#cd into directory with .py and .fastq -#at command prompt type 'python IlluminaChimera-Clean4.py yourfilename.fastq' and hit enter -#four new files will be created, 'mates_ICC4_your-R1-filename.fastq' and 'mates_ICC4_your-R2-filename.fastq' containing the -#true mate pairs with matching chimera codes, and 'non-mates_ICC4_your-R1-filename.fastq' and 'non-mates_ICC4_your-R2-filename.fastq' -#containing the chimera read pairs and unidentified read pairs +# cd into directory with .py and .fastq +# at command prompt type 'python IlluminaChimera-Clean4.py yourfilename.fastq' and hit enter +# four new files will be created, 'mates_ICC4_your-R1-filename.fastq' and 'mates_ICC4_your-R2-filename.fastq' containing the +# true mate pairs with matching chimera codes, and 'non-mates_ICC4_your-R1-filename.fastq' and 'non-mates_ICC4_your-R2-filename.fastq' +# containing the chimera read pairs and unidentified read pairs -import os -import time -import support import gzip import itertools +import os import sys +import time from site import addsitedir + +import support + import spades_init -import options_storage try: import regex except ImportError: - support.error("Can't process Lucigen NxMate reads! Python module regex is not installed!") + support.error("can't process Lucigen NxMate reads! Python module regex is not installed!") addsitedir(spades_init.ext_python_modules_home) if sys.version.startswith('2.'): @@ -48,10 +49,10 @@ elif sys.version.startswith('3.'): from joblib3 import Parallel, delayed - # CONSTANTS READS_PER_THREAD = 25000 -READS_PER_BATCH = READS_PER_THREAD * options_storage.threads # e.g. 100000 for 4 threads +THREADS = 1 +READS_PER_BATCH = READS_PER_THREAD * THREADS # e.g. 100000 for 4 threads minseq = 25 # minimum length sequence to keep after trimming @@ -68,10 +69,11 @@ def __init__(self, filePath, headerSymbols=['@', '+']): rec is tuple: (seqHeader,seqStr,qualHeader,qualStr) """ + if filePath.endswith('.gz'): self._file = gzip.open(filePath) else: - self._file = open(filePath, 'rU') #filePath, 'rU') test with explicit filename + self._file = open(filePath, 'rU') # filePath, 'rU') test with explicit filename self._currentLineNumber = 0 self._hdSyms = headerSymbols @@ -104,20 +106,20 @@ def __next__(self): assert trues == 4, \ "** ERROR: It looks like I encountered a premature EOF or empty line.\n\ Please check FastQ file near line number %s (plus or minus ~4 lines) and try again**" % ( - self._currentLineNumber) + self._currentLineNumber) # -- Make sure we are in the correct "register" -- assert elemList[0].startswith(self._hdSyms[0]), \ "** ERROR: The 1st line in fastq element does not start with '%s'.\n\ Please check FastQ file near line number %s (plus or minus ~4 lines) and try again**" % ( - self._hdSyms[0], self._currentLineNumber) + self._hdSyms[0], self._currentLineNumber) assert elemList[2].startswith(self._hdSyms[1]), \ "** ERROR: The 3rd line in fastq element does not start with '%s'.\n\ Please check FastQ file near line number %s (plus or minus ~4 lines) and try again**" % ( - self._hdSyms[1], self._currentLineNumber) + self._hdSyms[1], self._currentLineNumber) # -- Make sure the seq line and qual line have equal lengths -- assert len(elemList[1]) == len(elemList[3]), "** ERROR: The length of Sequence data and Quality data of the last record aren't equal.\n\ Please check FastQ file near line number %s (plus or minus ~4 lines) and try again**" % ( - self._currentLineNumber) + self._currentLineNumber) # ++++ Return fatsQ data as tuple ++++ return tuple(elemList) @@ -168,11 +170,11 @@ def chimera_clean_process_batch(reads, csslist1, csslist2): processed_slag1 = [] processed_slag2 = [] - #rec is tuple: (seqHeader,seqStr,qualHeader,qualStr) + # rec is tuple: (seqHeader,seqStr,qualHeader,qualStr) for recR1, recR2 in reads: stats.readcounter += 1 - #check if rec.seqStr contains match to chimera pattern + # check if rec.seqStr contains match to chimera pattern for cssindex, css1 in enumerate(csslist1): m = regex.search(css1, recR1[1]) css2 = csslist2[cssindex] @@ -180,26 +182,26 @@ def chimera_clean_process_batch(reads, csslist1, csslist2): if m and n: # a true mate pair! write out to mates files stats.TOTALmatecounter += 1 - #NOTE TAKE THIS OPPORTUNITY TO RECORD CSS CODE AND TRUNCATE READS - #need to trim additional 9+4 nts from end of match to remove css, Bst, barcode (9) and CGAT (4) linker - stats.csscounter[cssindex] += 1 #increment the appropriate css counter + # NOTE TAKE THIS OPPORTUNITY TO RECORD CSS CODE AND TRUNCATE READS + # need to trim additional 9+4 nts from end of match to remove css, Bst, barcode (9) and CGAT (4) linker + stats.csscounter[cssindex] += 1 # increment the appropriate css counter R1matches = m.span() mend = R1matches[1] mend = mend + 13 mySeq = recR1[1] - myR1 = mySeq[mend:] #trim the left end off of Read1 + myR1 = mySeq[mend:] # trim the left end off of Read1 myQual1 = recR1[3] - myR1Qual = myQual1[mend:] #trim the left end off of Read1 quality string + myR1Qual = myQual1[mend:] # trim the left end off of Read1 quality string R2matches = n.span() nend = R2matches[1] nend = nend + 13 mySeq2 = recR2[1] - myR2 = mySeq2[nend:] #trim the left end off of Read2 + myR2 = mySeq2[nend:] # trim the left end off of Read2 myQual2 = recR2[3] - myR2Qual = myQual2[nend:] #trim the left end off of Read2 quality string + myR2Qual = myQual2[nend:] # trim the left end off of Read2 quality string - if (len(myR1) >= minseq) and (len(myR2) >= minseq): #and if one or other is too short, toss both + if (len(myR1) >= minseq) and (len(myR2) >= minseq): # and if one or other is too short, toss both stats.matecounter += 1 processed_out1.append([recR1[0], myR1, recR1[2], myR1Qual]) processed_out2.append([recR2[0], myR2, recR2[2], myR2Qual]) @@ -221,7 +223,7 @@ def chimera_clean(infilename1, infilename2, dst, log, silent=True): basename2 = os.path.basename(infilename2) if os.path.splitext(basename2)[1] == '.gz': basename2 = os.path.splitext(basename2)[0] - #open four outfiles + # open four outfiles outfilename1 = os.path.join(dst, 'mates_ICC4_' + basename1) outfile1 = open(outfilename1, 'w') @@ -234,7 +236,7 @@ def chimera_clean(infilename1, infilename2, dst, log, silent=True): slagfilename2 = os.path.join(dst, 'non-mates_ICC4_' + basename2) slagfile2 = open(slagfilename2, 'w') - #set up regular expression patterns for chimera codes- for illumin use the reverse complements of right codes + # set up regular expression patterns for chimera codes- for illumin use the reverse complements of right codes csslist1 = ['(TGGACTCCACTGTG){e<=1}', '(ACTTCGCCACTGTG){e<=1}', '(TGAGTCCCACTGTG){e<=1}', '(TGACTGCCACTGTG){e<=1}', '(TCAGGTCCACTGTG){e<=1}', '(ATGTCACCACTGTG){e<=1}', '(GTATGACCACTGTG){e<=1}', '(GTCTACCCACTGTG){e<=1}', '(GTTGGACCACTGTG){e<=1}', '(CGATTCCCACTGTG){e<=1}', '(GGTTACCCACTGTG){e<=1}', '(TCACCTCCACTGTG){e<=1}'] @@ -243,12 +245,12 @@ def chimera_clean(infilename1, infilename2, dst, log, silent=True): '(AACCTCCCAATGTG){e<=1}', '(ACAACTCCAATGTG){e<=1}', '(GTCTAACCAATGTG){e<=1}', '(TACACGCCAATGTG){e<=1}', '(GAGAACCCAATGTG){e<=1}', '(GAGATTCCAATGTG){e<=1}', '(GACCTACCAATGTG){e<=1}', '(AGACTCCCAATGTG){e<=1}'] - #PARSE both files in tuples of 4 lines + # PARSE both files in tuples of 4 lines parserR1 = ParseFastQ(infilename1) parserR2 = ParseFastQ(infilename2) all_stats = CleanStats() - n_jobs = options_storage.threads + n_jobs = THREADS while True: # prepare input reads1 = list(itertools.islice(parserR1, READS_PER_BATCH)) @@ -284,7 +286,7 @@ def chimera_clean(infilename1, infilename2, dst, log, silent=True): if all_stats.readcounter == 0: support.error("lucigen_nxmate.py, chimera_clean: error in input data! Number of processed reads is 0!", log) if not silent: - #print some stats + # print some stats percentmates = 100. * all_stats.matecounter / all_stats.readcounter percentslag = 100. * all_stats.slagcounter / all_stats.readcounter log.info("==== chimera_clean info: processing finished!") @@ -331,29 +333,29 @@ def nx_seq_junction_process_batch(reads, jctstr): m = regex.search(jctstr, recR1[1]) n = regex.search(jctstr, recR2[1]) - if m and n: #found jctstr in both reads; need to save left part of R1 and LEFT part of R2 + if m and n: # found jctstr in both reads; need to save left part of R1 and LEFT part of R2 stats.bothjctcounter += 1 matches = m.span() start = matches[0] - mySeq = recR1[1] #get the left part of Read1 + mySeq = recR1[1] # get the left part of Read1 myLeft = mySeq[:start] myQual = recR1[3] - myLeftQual = myQual[:start] #get left part of Read1 quality string + myLeftQual = myQual[:start] # get left part of Read1 quality string nmatches = n.span() nstart = nmatches[0] mySeq2 = recR2[1] - myRight2 = mySeq2[:nstart] #get left part of Read2 + myRight2 = mySeq2[:nstart] # get left part of Read2 myQual2 = recR2[3] - myRightQual2 = myQual2[:nstart] #get left part of Read2 quality string + myRightQual2 = myQual2[:nstart] # get left part of Read2 quality string - #only write out as split if both pieces are big enough + # only write out as split if both pieces are big enough if (len(myLeft) > minseq) and (len(myRight2) > minseq): stats.splitcounter += 1 stats.R1R2jctcounter += 1 processed_split1.append([recR1[0], myLeft, recR1[2], myLeftQual]) processed_split2.append([recR2[0], myRight2, recR2[2], myRightQual2]) - elif n: #junction only in R2, so save entire R1 and LEFT part of R2 IFF R2 long enough + elif n: # junction only in R2, so save entire R1 and LEFT part of R2 IFF R2 long enough nmatches = n.span() nstart = nmatches[0] mySeq2 = recR2[1] @@ -366,7 +368,7 @@ def nx_seq_junction_process_batch(reads, jctstr): processed_split1.append([recR1[0], recR1[1], recR1[2], recR1[3]]) stats.jctcounter += 1 stats.R2jctcounter += 1 - elif m: #junction only in R1, save left part of R1 and entire R2, IFF R1 is long enough + elif m: # junction only in R1, save left part of R1 and entire R2, IFF R1 is long enough matches = m.span() start = matches[0] mySeq = recR1[1] @@ -379,7 +381,7 @@ def nx_seq_junction_process_batch(reads, jctstr): processed_split2.append([recR2[0], recR2[1], recR2[2], recR2[3]]) stats.jctcounter += 1 stats.R1jctcounter += 1 - else: #no junctions, save for frag use, as is 'unsplit'; note this file will be interleaved R1 R2 R1 R2... + else: # no junctions, save for frag use, as is 'unsplit'; note this file will be interleaved R1 R2 R1 R2... processed_unsplit.append([recR1[0], recR1[1], recR1[2], recR1[3]]) processed_unsplit.append([recR2[0], recR2[1], recR2[2], recR2[3]]) return [processed_split1, processed_split2, processed_unsplit], stats @@ -394,7 +396,7 @@ def nx_seq_junction(infilename1, infilename2, dst, log, silent=True): basename2 = os.path.basename(infilename2) if os.path.splitext(basename2)[1] == '.gz': basename2 = os.path.splitext(basename2)[0] - #open three outfiles + # open three outfiles splitfilenameleft = os.path.join(dst, 'R1_IJS7_' + basename1) splitfile1 = open(splitfilenameleft, 'w') @@ -404,16 +406,16 @@ def nx_seq_junction(infilename1, infilename2, dst, log, silent=True): unsplitfilename = os.path.join(dst, 'unsplit_IJS7_' + basename1.replace('_R1_', '_R1R2_')) unsplitfile = open(unsplitfilename, 'w') - #jctstr = '(GGTTCATCGTCAGGCCTGACGATGAACC){e<=4}' # JS7 24/28 required results in ~92% detected in ion torrent + # jctstr = '(GGTTCATCGTCAGGCCTGACGATGAACC){e<=4}' # JS7 24/28 required results in ~92% detected in ion torrent # from NextClip: --adaptor_sequence GTTCATCGTCAGG -e --strict_match 22,11 --relaxed_match 20,10 eg strict 22/26 = 4 errors, relaxed 20/26 = 6 errors jctstr = '(GTTCATCGTCAGGCCTGACGATGAAC){e<=4}' # try 22/26 to match NextClip strict (e<=6 for relaxed) - #PARSE both files in tuples of 4 lines + # PARSE both files in tuples of 4 lines parserR1 = ParseFastQ(infilename1) parserR2 = ParseFastQ(infilename2) all_stats = JunctionStats() - n_jobs = options_storage.threads + n_jobs = THREADS while True: # prepare input reads1 = list(itertools.islice(parserR1, READS_PER_BATCH)) @@ -448,7 +450,7 @@ def nx_seq_junction(infilename1, infilename2, dst, log, silent=True): if all_stats.splitcounter == 0: support.error("lucigen_nxmate.py, nx_seq_junction: error in input data! Number of split pairs is 0!", log) if not silent: - #print some stats + # print some stats percentsplit = 100 * all_stats.splitcounter / all_stats.readcounter percentR1R2 = 100 * all_stats.R1R2jctcounter / all_stats.splitcounter percentR1 = 100 * all_stats.R1jctcounter / all_stats.splitcounter @@ -470,9 +472,18 @@ def nx_seq_junction(infilename1, infilename2, dst, log, silent=True): return splitfilenameleft, splitfilenameright, unsplitfilename -def process_reads(left_reads_fpath, right_reads_fpath, dst, log): +def process_reads(left_reads_fpath, right_reads_fpath, dst, log, threads): + left_reads_fpath = left_reads_fpath.strip() + right_reads_fpath = right_reads_fpath.strip() + + global READS_PER_BATCH + global THREADS + THREADS = threads + READS_PER_BATCH = READS_PER_THREAD * threads # e.g. 100000 for 4 threads + log.info("== Processing Lucigen NxMate reads (" + left_reads_fpath + " and " + os.path.basename(right_reads_fpath) + " (results are in " + dst + " directory)") cleaned_filename1, cleaned_filename2 = chimera_clean(left_reads_fpath, right_reads_fpath, dst, log, silent=False) - split_filename1, split_filename2, unsplit_filename = nx_seq_junction(cleaned_filename1, cleaned_filename2, dst, log, silent=False) + split_filename1, split_filename2, unsplit_filename = nx_seq_junction(cleaned_filename1, cleaned_filename2, dst, log, + silent=False) return split_filename1, split_filename2, unsplit_filename diff --git a/src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/options_parser.py b/src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/options_parser.py new file mode 100644 index 0000000..75dd988 --- /dev/null +++ b/src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/options_parser.py @@ -0,0 +1,1167 @@ +#!/usr/bin/env python + +############################################################################ +# Copyright (c) 2019 Saint Petersburg State University +# All Rights Reserved +# See file LICENSE for details. +############################################################################ + +import os +import sys +import argparse +from gettext import gettext +from os.path import basename +from os.path import abspath, expanduser + +import support +import options_storage +from process_cfg import empty_config + +def get_mode(): + mode = None + script_basename = basename(options_storage.first_command_line[0]) + options = options_storage.first_command_line + + mode_parser = argparse.ArgumentParser(add_help=False) + mode_parser.add_argument("--isolate", dest="isolate", action="store_true") + mode_parser.add_argument("--rna", dest="rna", action="store_true") + mode_parser.add_argument("--plasmid", dest="plasmid", action="store_true") + mode_parser.add_argument("--meta", dest="meta", action="store_true") + mode_parser.add_argument("--bio", dest="bio", action="store_true") + nargs, unknown_args = mode_parser.parse_known_args(options) + + if script_basename == "rnaspades.py" or nargs.rna: + mode = "rna" + elif script_basename == "plasmidspades.py" or nargs.plasmid: + mode = "plasmid" + elif nargs.bio: + mode = "bgc" + elif script_basename == "metaspades.py" or nargs.meta: + mode = "meta" + if script_basename == "metaplasmidspades.py" or (nargs.plasmid and nargs.meta): + mode = "metaplasmid" + return mode + + +def add_mode_to_args(args): + mode = get_mode() + if mode == "rna": + args.rna = True + elif mode == "plasmid": + args.plasmid = True + elif mode == "bgc": + args.meta = True + args.bio = True + elif mode == "meta": + args.meta = True + elif mode == "metaplasmid": + args.meta = True + args.plasmid = True + + + +def version(): + mode = get_mode() + ver = "SPAdes genome assembler v%s" % options_storage.spades_version + if mode is not None: + ver += " [%sSPAdes mode]" % mode + return ver + + +class SpadesHelpFormatter(argparse.HelpFormatter): + def __init__(self, prog, indent_increment=2, max_help_position=30, width=100): + super(SpadesHelpFormatter, self).__init__(prog, indent_increment, max_help_position, width) + + def _split_lines(self, text, width): + return text.splitlines() + + def _format_usage(self, usage, actions, group, prefix=None): + if prefix is None: + prefix = gettext(version() + "\n\nUsage: ") + return argparse.HelpFormatter._format_usage(self, usage, actions, group, prefix) + + +def init_dataset_data(): + return dict() + + +class AddToDatasetAction(argparse.Action): + def __init__(self, option_strings, dest, nargs=None, const=None, default=None, type=None, choices=None, + required=False, help=None, metavar=None): + super(AddToDatasetAction, self).__init__(option_strings, dest, nargs, const, default, type, choices, required, + help, metavar) + + def __call__(self, parser, namespace, values, option_string=None): + if option_string == "-s": + support.old_style_single_reads = True + if option_string not in options_storage.OLD_STYLE_READS_OPTIONS: + support.only_old_style_options = False + + # create dataset_data if don't exsist + if not "dataset_data" in namespace: + dataset_data = init_dataset_data() + setattr(namespace, "dataset_data", dataset_data) + + # transfer new format to old + arg = "" + if len(values) == 2: + opt = "--" + option_string.split('-')[2] + values[0] + if len(option_string.split('-')) > 3: + if option_string.split('-')[-1] == "or": + opt += "-" + values[1] + else: + opt += "-" + option_string.split('-')[-1] + arg = values[-1] + else: + arg = values[-1] + else: + opt = option_string + if len(values) > 0: + arg = values[0] + + # add to dataset for old format + support.add_to_dataset(opt, arg, namespace.dataset_data) + + +class StoreUniqueAction(argparse.Action): + def __init__(self, option_strings, dest, nargs=None, const=None, default=None, type=None, choices=None, + required=False, help=None, metavar=None): + super(StoreUniqueAction, self).__init__(option_strings=option_strings, dest=dest, nargs=nargs, const=const, + default=default, type=type, choices=choices, required=required, + help=help, metavar=metavar) + + def __call__(self, parser, namespace, values, option_string=None): + if namespace.__dict__[self.dest] is not None: + raise argparse.ArgumentError(self, "option was specified at least twice") + setattr(namespace, self.dest, values) + + +class ConcatenationAction(argparse.Action): + def __init__(self, option_strings, dest, nargs=None, const=None, default=None, type=None, choices=None, + required=False, help=None, metavar=None): + super(ConcatenationAction, self).__init__(option_strings=option_strings, dest=dest, nargs=nargs, const=const, + default=default, type=type, choices=choices, required=required, + help=help, metavar=metavar) + + def __call__(self, parser, namespace, values, option_string=None): + values = [x for outer in values for x in outer] + if len(values) == 1 and values[0] == "auto": + values = values[0] + elif len(values) > 1 and "auto" in values: + raise argparse.ArgumentError(self, "cann't set 'auto' and kmers' size at the same time") + setattr(namespace, self.dest, values) + + +def kmer(arg): + if arg == "auto": + return [arg] + else: + k = int(arg) + if k < options_storage.MIN_K or k > options_storage.MAX_K: + raise argparse.ArgumentTypeError("wrong k value %d: all k values should be between %d and %d" % + (k, options_storage.MIN_K, options_storage.MAX_K)) + if k % 2 == 0: + raise argparse.ArgumentTypeError("wrong k value %d: all k values should be odd" % k) + return [k] + + +def kmers(arg): + k_mers = arg + if k_mers[-1] == ',': + k_mers = k_mers[:-1] + k_mers = k_mers.split(",") + for i in range(len(k_mers)): + k_mers[i] = kmer(k_mers[i])[0] + return k_mers + + +def qvoffset(arg): + if arg == "auto": + return arg + else: + return int(arg) + + +def cov_cutoff(arg): + if arg == "auto" or arg == "off": + return arg + elif support.is_float(arg) and float(arg) > 0.0: + return float(arg) + else: + raise argparse.ArgumentTypeError("wrong value %s (should be a positive float number, or 'auto', or 'off')" % arg) + + +def lcer_cutoff(arg): + if support.is_float(arg) and float(arg) > 0.0: + return float(arg) + else: + raise argparse.ArgumentTypeError("wrong value %s (should be a positive float number)" % arg) + + +def restart_from(arg): + if arg not in options_storage.SHORT_STAGES_NAME and arg != options_storage.LAST_STAGE and not arg.startswith("k"): + raise argparse.ArgumentTypeError("wrong value %s (should be 'ec', 'as', 'k', 'mc', or '%s')" % (arg, options_storage.LAST_STAGE)) + return arg + + +def stop_after(arg): + if arg not in options_storage.SHORT_STAGES_NAME and not arg.startswith("k"): + raise argparse.ArgumentTypeError("wrong value %s (should be 'ec', 'as', 'k', or 'mc')" % arg) + return arg + + +def read_cov_threshold(arg): + if support.is_int(arg) and int(arg) >= 0: + return int(arg) + else: + raise argparse.ArgumentTypeError("wrong value %s (should be a non-negative integer number)" % arg) + + +def add_deprecated_input_data_args(pgroup_input_data): + for num in range(1, 10): + for sufix in ["-12", "-1", "-2", "-s"]: + pgroup_input_data.add_argument("--pe%d%s" % (num, sufix), + metavar="", + nargs=1, + help=argparse.SUPPRESS, + action=AddToDatasetAction) + pgroup_input_data.add_argument("--mp%d%s" % (num, sufix), + metavar="", + nargs=1, + help=argparse.SUPPRESS, + action=AddToDatasetAction) + pgroup_input_data.add_argument("--hqmp%d%s" % (num, sufix), + metavar="", + nargs=1, + help=argparse.SUPPRESS, + action=AddToDatasetAction) + + for orientation in ["-fr", "-rf", "-ff"]: + pgroup_input_data.add_argument("--pe%d%s" % (num, orientation), + nargs=0, + help=argparse.SUPPRESS, + action=AddToDatasetAction) + pgroup_input_data.add_argument("--mp%d%s" % (num, orientation), + nargs=0, + help=argparse.SUPPRESS, + action=AddToDatasetAction) + pgroup_input_data.add_argument("--hqmp%d%s" % (num, orientation), + nargs=0, + help=argparse.SUPPRESS, + action=AddToDatasetAction) + + pgroup_input_data.add_argument("--s%d" % num, + metavar="", + nargs=1, + help=argparse.SUPPRESS, + action=AddToDatasetAction) + pgroup_input_data.add_argument("--pe%d-m" % num, + metavar="", + nargs=1, + help=argparse.SUPPRESS, + action=AddToDatasetAction) + pgroup_input_data.add_argument("--nxmate%d-1" % num, + metavar="", + nargs=1, + help=argparse.SUPPRESS, + action=AddToDatasetAction) + pgroup_input_data.add_argument("--nxmate%d-2" % num, + metavar="", + nargs=1, + help=argparse.SUPPRESS, + action=AddToDatasetAction) + + +def add_basic_args(pgroup_basic): + mode = get_mode() + pgroup_basic.add_argument("-o", + metavar="", + help="directory to store all the resulting files (required)", + type=str, + default=None, + dest="output_dir", + action=StoreUniqueAction) + + help_hidden = (mode is not None) + pgroup_basic.add_argument("--isolate", + dest="isolate", + help="this flag is highly recommended for high-coverage isolate and multi-cell data" + if not help_hidden else argparse.SUPPRESS, + action="store_true") + pgroup_basic.add_argument("--sc", + dest="single_cell", + help="this flag is required for MDA (single-cell) data" + if not help_hidden else argparse.SUPPRESS, + action="store_true") + pgroup_basic.add_argument("--meta", + dest="meta", + help="this flag is required for metagenomic sample data" + if not help_hidden else argparse.SUPPRESS, + action="store_true") + pgroup_basic.add_argument("--bio", + dest="bio", + help="this flag is required for biosyntheticSPAdes mode" + if not help_hidden else argparse.SUPPRESS, + action="store_true") + pgroup_basic.add_argument("--rna", + dest="rna", + help="this flag is required for RNA-Seq data" + if not help_hidden else argparse.SUPPRESS, + action="store_true") + pgroup_basic.add_argument("--plasmid", + dest="plasmid", + help="runs plasmidSPAdes pipeline for plasmid detection" + if not help_hidden else argparse.SUPPRESS, + action="store_true") + pgroup_basic.add_argument("--iontorrent", + dest="iontorrent", + help="this flag is required for IonTorrent data", + action="store_true") + pgroup_basic.add_argument("--test", + dest="test_mode", + help="runs SPAdes on toy dataset", + action="store_true") + pgroup_basic.add_argument("-h", "--help", + help="prints this usage message", + action="help") + pgroup_basic.add_argument("-v", "--version", + help="prints version", + action="version", + version=version()) + + +def add_library_args(libid, name, suffixes, pgroup_input_data, help_hidden=False): + if "12" in suffixes: + pgroup_input_data.add_argument("--%s-12" % libid, + metavar=("<#>", ""), + nargs=2, + help="file with interlaced reads for %s library number <#>.\n" + "Older deprecated syntax is -%s<#>-12 " % (name, libid) + if not help_hidden else argparse.SUPPRESS, + action=AddToDatasetAction) + if "1" in suffixes: + pgroup_input_data.add_argument("--%s-1" % libid, metavar=("<#>", ""), + nargs=2, + help="file with forward reads for %s library number <#>.\n" + "Older deprecated syntax is -%s<#>-1 " % (name, libid) + if not help_hidden else argparse.SUPPRESS, + action=AddToDatasetAction) + + if "2" in suffixes: + pgroup_input_data.add_argument("--%s-2" % libid, + metavar=("<#>", ""), + nargs=2, + help="file with reverse reads for %s library number <#>.\n" + "Older deprecated syntax is -%s<#>-2 " % (name, libid) + if not help_hidden else argparse.SUPPRESS, + action=AddToDatasetAction) + + if "s" in suffixes: + pgroup_input_data.add_argument("--%s-s" % libid, + metavar=("<#>", ""), + nargs=2, + help="file with unpaired reads for %s library number <#>.\n" + "Older deprecated syntax is -%s<#>-s " % (name, libid) + if not help_hidden else argparse.SUPPRESS, + action=AddToDatasetAction) + + if "m" in suffixes: + pgroup_input_data.add_argument("--%s-m" % libid, + metavar=("<#>", ""), + nargs=2, + help="file with merged reads for %s library number <#>.\n" + "Older deprecated syntax is -%s<#>-m " % (name, libid) + if not help_hidden else argparse.SUPPRESS, + action=AddToDatasetAction) + + if "or" in suffixes: + pgroup_input_data.add_argument("--%s-or" % libid, + metavar=("<#>", ""), + nargs=2, + help="orientation of reads for %s library number <#> \n( = fr, rf, ff).\n" + "Older deprecated syntax is -%s<#>-" % (name, libid) + if not help_hidden else argparse.SUPPRESS, + action=AddToDatasetAction) + + +def add_input_data_args(pgroup_input_data): + mode = get_mode() + + pgroup_input_data.add_argument("--12", + metavar="", + nargs=1, + help="file with interlaced forward and reverse paired-end reads", + action=AddToDatasetAction) + pgroup_input_data.add_argument("-1", + metavar="", + nargs=1, + help="file with forward paired-end reads", + action=AddToDatasetAction) + pgroup_input_data.add_argument("-2", + metavar="", + nargs=1, + help="file with reverse paired-end reads", + action=AddToDatasetAction) + pgroup_input_data.add_argument("-s", + metavar="", + nargs=1, + help="file with unpaired reads", + action=AddToDatasetAction) + pgroup_input_data.add_argument("--merged", + metavar="", + nargs=1, + help="file with merged forward and reverse paired-end reads", + action=AddToDatasetAction) + + add_deprecated_input_data_args(pgroup_input_data) + help_hidden = (mode in ["rna", "meta"]) + add_library_args("pe", "paired-end", ["12", "1", "2", "s", "m", "or"], pgroup_input_data) + pgroup_input_data.add_argument("--s", + metavar=("<#>", ""), + nargs=2, + help="file with unpaired reads for single reads library number <#>.\n" + "Older deprecated syntax is --s<#> ", + action=AddToDatasetAction) + add_library_args("mp", "mate-pair", ["12", "1", "2", "s", "or"], pgroup_input_data, help_hidden) + add_library_args("hqmp", "high-quality mate-pair", ["12", "1", "2", "s", "or"], pgroup_input_data, help_hidden) + add_library_args("nxmate", "Lucigen NxMate", ["1", "2"], pgroup_input_data, help_hidden) + + pgroup_input_data.add_argument("--sanger", + metavar="", + nargs=1, + help="file with Sanger reads" + if not help_hidden else argparse.SUPPRESS, + action=AddToDatasetAction) + + help_hidden = (mode == "rna") + pgroup_input_data.add_argument("--pacbio", + metavar="", + nargs=1, + help="file with PacBio reads", + action=AddToDatasetAction) + pgroup_input_data.add_argument("--nanopore", + metavar="", + nargs=1, + help="file with Nanopore reads", + action=AddToDatasetAction) + + help_hidden = (mode == "rna") + pgroup_input_data.add_argument("--tslr", + metavar="", + nargs=1, + help="file with TSLR-contigs" + if not help_hidden else argparse.SUPPRESS, + action=AddToDatasetAction) + + help_hidden = (mode == "meta") + pgroup_input_data.add_argument("--trusted-contigs", + metavar="", + nargs=1, + help="file with trusted contigs" + if not help_hidden else argparse.SUPPRESS, + action=AddToDatasetAction) + + pgroup_input_data.add_argument("--untrusted-contigs", + metavar="", + nargs=1, + help="file with untrusted contigs" + if not help_hidden else argparse.SUPPRESS, + action=AddToDatasetAction) + + help_hidden = (mode != "rna") + pgroup_input_data.add_argument("--fl-rna", + metavar="", + nargs=1, + help="file with PacBio/Nanopore/contigs that capture full-length transcripts" + if not help_hidden else argparse.SUPPRESS, + action=AddToDatasetAction) + pgroup_input_data.add_argument("--ss", + metavar="", + dest="strand_specificity", + choices=["fr", "rf"], + help="strand specific data, = fr (normal) and rf (antisense).\n" + "Older deprecated syntax is --ss-" + if not help_hidden else argparse.SUPPRESS, + action="store") + + pgroup_input_data.add_argument("--ss-fr", + metavar="", + dest="strand_specificity", + const="fr", + help=argparse.SUPPRESS, + action="store_const") + + pgroup_input_data.add_argument("--ss-rf", + dest="strand_specificity", + const="rf", + help=argparse.SUPPRESS, + action="store_const") + +def add_pipeline_args(pgroup_pipeline): + mode = get_mode() + help_hidden = (mode == "rna") + pgroup_pipeline.add_argument("--only-error-correction", + dest="only_error_correction", + default=None, + help="runs only read error correction (without assembling)" + if not help_hidden else argparse.SUPPRESS, + action="store_true") + pgroup_pipeline.add_argument("--only-assembler", + dest="only_assembler", + default=None, + help="runs only assembling (without read error correction)" + if not help_hidden else argparse.SUPPRESS, + action="store_true") + + help_hidden = (mode in ["rna", "meta"]) + careful_group = pgroup_pipeline.add_mutually_exclusive_group() + careful_group.add_argument("--careful", + dest="careful", + default=None, + help="tries to reduce number of mismatches and short indels" + if not help_hidden else argparse.SUPPRESS, + action="store_true") + careful_group.add_argument("--careful:false", + dest="careful", + default=None, + help=argparse.SUPPRESS, + action="store_false") + pgroup_pipeline.add_argument("--checkpoints", + metavar="", + dest="checkpoints", + help="save intermediate check-points ('last', 'all')", + action="store") + pgroup_pipeline.add_argument("--continue", + dest="continue_mode", + help="continue run from the last available check-point", + action="store_true") + + restart_from_help = "restart run with updated options and from the specified check-point\n" \ + "('ec', 'as', 'k', 'mc', '%s')" % options_storage.LAST_STAGE + if mode == "rna": + restart_from_help = "restart run with updated options and from the specified check-point\n" \ + "('as', 'k', '%s')" % options_storage.LAST_STAGE + pgroup_pipeline.add_argument("--restart-from", + metavar="", + dest="restart_from", + default=None, + type=restart_from, + help=restart_from_help, + action="store") + + disable_gzip_output_group = pgroup_pipeline.add_mutually_exclusive_group() + disable_gzip_output_group.add_argument("--disable-gzip-output", + dest="disable_gzip_output", + default=None, + help="forces error correction not to compress the corrected reads", + action="store_true") + disable_gzip_output_group.add_argument("--disable-gzip-output:false", + dest="disable_gzip_output", + default=None, + help=argparse.SUPPRESS, + action="store_false") + + disable_rr = pgroup_pipeline.add_mutually_exclusive_group() + disable_rr.add_argument("--disable-rr", + dest="disable_rr", + default=None, + help="disables repeat resolution stage of assembling", + action="store_true") + disable_rr.add_argument("--disable-rr:false", + dest="disable_rr", + default=None, + help=argparse.SUPPRESS, + action="store_false") + + +def add_advanced_args(pgroup_advanced): + mode = get_mode() + pgroup_advanced.add_argument("--dataset", + metavar="", + type=support.check_file_existence, + dest="dataset_yaml_filename", + help="file with dataset description in YAML format", + action="store") + + pgroup_advanced.add_argument("-t", "--threads", + metavar="", + dest="threads", + type=int, + help="number of threads. [default: %s]\n" % options_storage.THREADS, + action="store") + + pgroup_advanced.add_argument("-m", "--memory", + metavar="", + type=int, + dest="memory", + help="RAM limit for SPAdes in Gb (terminates if exceeded). [default: %s]\n" % options_storage.MEMORY, + action="store") + pgroup_advanced.add_argument("--tmp-dir", + metavar="", + help="directory for temporary files. [default: /tmp]", + dest="tmp_dir", + action="store") + + pgroup_advanced.add_argument("-k", + metavar="", + dest="k_mers", + nargs='+', + type=kmers, + help="list of k-mer sizes (must be odd and less than %d)\n" + "[default: 'auto']" % (options_storage.MAX_K + 1), + action=ConcatenationAction) + + help_hidden = (mode in ["rna", "meta"]) + pgroup_advanced.add_argument("--cov-cutoff", + metavar="", + type=cov_cutoff, + default=None, + dest="cov_cutoff", + help="coverage cutoff value (a positive float number, " + "or 'auto', or 'off')\n[default: 'off']" + if not help_hidden else argparse.SUPPRESS, + action="store") + + pgroup_advanced.add_argument("--phred-offset", + metavar="<33 or 64>", + dest="qvoffset", + type=qvoffset, + help="PHRED quality offset in the input reads (33 or 64),\n" + "[default: auto-detect]", + action="store") + + +def add_hidden_args(pgroup_hidden): + show_help_hidden = ("--help-hidden" in sys.argv) + + debug_group = pgroup_hidden.add_mutually_exclusive_group() + debug_group.add_argument("--debug", + dest="developer_mode", + default=None, + help="runs SPAdes in debug mode" + if show_help_hidden else argparse.SUPPRESS, + action="store_true") + debug_group.add_argument("--debug:false", + dest="developer_mode", + default=None, + help=argparse.SUPPRESS, + action="store_false") + + pgroup_hidden.add_argument("--stop-after", + metavar="", + dest="stop_after", + type=stop_after, + help="runs SPAdes until the specified check-point ('ec', 'as', 'k', 'mc') inclusive" + if show_help_hidden else argparse.SUPPRESS, + action="store") + pgroup_hidden.add_argument("--truseq", + dest="truseq_mode", + default=None, + help="runs SPAdes in TruSeq mode" + if show_help_hidden else argparse.SUPPRESS, + action="store_true") + + mismatch_correction_group = pgroup_hidden.add_mutually_exclusive_group() + mismatch_correction_group.add_argument("--mismatch-correction", + dest="mismatch_corrector", + default=None, + help="runs post processing correction of mismatches and short indels" + if show_help_hidden else argparse.SUPPRESS, + action="store_true") + mismatch_correction_group.add_argument("--mismatch-correction:false", + dest="mismatch_corrector", + default=None, + help=argparse.SUPPRESS, + action="store_false") + + pgroup_hidden.add_argument("--reference", + metavar="", + dest="reference", + type=support.check_file_existence, + help="file with reference for deep analysis (only in debug mode)" + if show_help_hidden else argparse.SUPPRESS, + action="store") + pgroup_hidden.add_argument("--series-analysis", + metavar="", + dest="series_analysis", + type=support.check_file_existence, + help="config for metagenomics-series-augmented reassembly" + if show_help_hidden else argparse.SUPPRESS, + action="store") + pgroup_hidden.add_argument("--configs-dir", + metavar="", + dest="configs_dir", + type=support.check_dir_existence, + help="directory with configs" + if show_help_hidden else argparse.SUPPRESS, + action="store") + pgroup_hidden.add_argument("--read-buffer-size", + metavar="", + dest="read_buffer_size", + type=int, + help="sets size of read buffer for graph construction" + if show_help_hidden else argparse.SUPPRESS, + action="store") + pgroup_hidden.add_argument("--large-genome", + dest="large_genome", + default=False, + help="Enables optimizations for large genomes" + if show_help_hidden else argparse.SUPPRESS, + action="store_true") + pgroup_hidden.add_argument("--save-gp", + dest="save_gp", + default=None, + help="Enables saving graph pack before repeat resolution (even without --debug)" + if show_help_hidden else argparse.SUPPRESS, + action="store_true") + pgroup_hidden.add_argument("--hidden-cov-cutoff", + metavar="", + type=lcer_cutoff, + dest="lcer_cutoff", + help="coverage cutoff value deeply integrated in simplification" \ + " (a positive float number). Base coverage! Will be adjusted depending on K and RL!" + if show_help_hidden else argparse.SUPPRESS, + action="store") + pgroup_hidden.add_argument("--read-cov-threshold", + metavar="", + dest="read_cov_threshold", + type=read_cov_threshold, + help="read median coverage threshold (non-negative integer)" + if show_help_hidden else argparse.SUPPRESS, + action="store") + pgroup_hidden.add_argument("--only-generate-config", + dest="only_generate_config", + help="generate configs and print script to run_spades.sh" + if show_help_hidden else argparse.SUPPRESS, + action="store_true") + pgroup_hidden.add_argument("--help-hidden", + help="prints this usage message with all hidden options" + if show_help_hidden else argparse.SUPPRESS, + action="help") + + +def create_parser(): + parser = argparse.ArgumentParser(prog="spades.py", formatter_class=SpadesHelpFormatter, + usage="%(prog)s [options] -o ", add_help=False) + + #pgroup for parser group + pgroup_basic = parser.add_argument_group("Basic options") + pgroup_input_data = parser.add_argument_group("Input data") + pgroup_pipeline = parser.add_argument_group("Pipeline options") + pgroup_advanced = parser.add_argument_group("Advanced options") + pgroup_hidden = parser.add_argument_group("Hidden options") + + add_basic_args(pgroup_basic) + add_input_data_args(pgroup_input_data) + add_pipeline_args(pgroup_pipeline) + add_advanced_args(pgroup_advanced) + add_hidden_args(pgroup_hidden) + + return parser + + +def check_options_for_restart_from(log): + if options_storage.args.dataset_yaml_filename: + support.error("you cannot specify --dataset with --restart-from option!", log) + if options_storage.args.single_cell: + support.error("you cannot specify --sc with --restart-from option!", log) + if options_storage.args.meta: + support.error("you cannot specify --meta with --restart-from option!", log) + if options_storage.args.plasmid: + support.error("you cannot specify --plasmid with --restart-from option!", log) + if options_storage.args.rna: + support.error("you cannot specify --rna with --restart-from option!", log) + if options_storage.args.isolate: + support.error("you cannot specify --isolate with --restart-from option!", log) + if options_storage.args.iontorrent: + support.error("you cannot specify --iontorrent with --restart-from option!", log) + if options_storage.args.only_assembler: + support.error("you cannot specify --only-assembler with --restart-from option!", log) + if options_storage.args.only_error_correction: + support.error("you cannot specify --only-error-correction with --restart-from option!", log) + if options_storage.args.strand_specificity is not None: + support.error("you cannot specify strand specificity (--ss-rf or --ss-fr) with --restart-from option!", log) + +def add_to_option(args, log, skip_output_dir): + if args.restart_from: + check_options_for_restart_from(log) + + add_mode_to_args(options_storage.args) + + if args.test_mode: + if not skip_output_dir: + if "output_dir" in options_storage.args and options_storage.args.output_dir is not None: + support.error("you cannot specify -o and --test simultaneously") + options_storage.args.output_dir = os.path.abspath("spades_test") + else: + if "output_dir" not in options_storage.args or options_storage.args.output_dir is None: + support.error("the output_dir is not set! It is a mandatory parameter (-o output_dir).") + + if not skip_output_dir: + output_dir = abspath(expanduser(args.output_dir)) + options_storage.dict_of_rel2abs[args.output_dir] = output_dir + support.check_path_is_ascii(output_dir, "output directory") + args.output_dir = output_dir + + if args.tmp_dir is not None: + tmp_dir = abspath(expanduser(args.tmp_dir)) + options_storage.dict_of_rel2abs[args.tmp_dir] = tmp_dir + support.check_path_is_ascii(tmp_dir, "directory for temporary files") + args.tmp_dir = tmp_dir + + if "reference" in args and args.reference is not None: + args.developer_mode = True + + if args.only_assembler and args.only_error_correction: + support.error("you cannot specify --only-error-correction and --only-assembler simultaneously") + + if args.rna and args.only_error_correction: + support.error("you cannot specify --only-error-correction in RNA-seq mode!", log) + + if args.isolate and args.only_error_correction: + support.error("you cannot specify --only-error-correction in isolate mode!", log) + + if args.careful == False and args.mismatch_corrector == True: + support.error("you cannot specify --mismatch-correction and --careful:false simultaneously") + + if args.careful == True and args.mismatch_corrector == False: + support.error("you cannot specify --mismatch-correction:false and --careful simultaneously") + + if args.rna and (args.careful or args.mismatch_corrector): + support.error("you cannot specify --mismatch-correction or --careful in RNA-seq mode!", log) + + if args.isolate and (args.careful or args.mismatch_corrector): + support.error("you cannot specify --mismatch-correction or --careful in isolate mode!", log) + + if args.only_assembler and args.isolate: + support.warning("Isolate mode already implies --only-assembler, so this option has no effect.") + + if args.restart_from is not None: + args.continue_mode = True + if args.careful is not None: + args.mismatch_corrector = args.careful + if args.truseq_mode: + enable_truseq_mode() + if (args.isolate or args.rna) and not args.iontorrent: + args.only_assembler = True + + +def add_to_cfg(cfg, log, bin_home, spades_home, args): + ### FILLING cfg + cfg["common"] = empty_config() + cfg["dataset"] = empty_config() + if not args.only_assembler: + cfg["error_correction"] = empty_config() + if not args.only_error_correction: + cfg["assembly"] = empty_config() + + # common + cfg["common"].__dict__["checkpoints"] = args.checkpoints + cfg["common"].__dict__["output_dir"] = args.output_dir + cfg["common"].__dict__["tmp_dir"] = args.tmp_dir + cfg["common"].__dict__["max_threads"] = args.threads + cfg["common"].__dict__["max_memory"] = args.memory + cfg["common"].__dict__["developer_mode"] = args.developer_mode + if args.series_analysis: + cfg["common"].__dict__["series_analysis"] = args.series_analysis + if args.bio: + biosyntheticspades_hmms_path = os.path.join(spades_home, options_storage.biosyntheticspades_hmms) + is_hmmfile = lambda hmmfile: os.path.isfile(os.path.join(biosyntheticspades_hmms_path, hmmfile)) \ + and (hmmfile.endswith("hmm") or hmmfile.endswith("hmm.gz")) + cfg["common"].__dict__["set_of_hmms"] = ",".join([os.path.join(biosyntheticspades_hmms_path, hmmfile) + for hmmfile in os.listdir(biosyntheticspades_hmms_path) + if is_hmmfile(hmmfile)]) + + # dataset section + cfg["dataset"].__dict__["yaml_filename"] = args.dataset_yaml_filename + if args.developer_mode and args.reference: + cfg["dataset"].__dict__["reference"] = args.reference + + # error correction + if not args.only_assembler: + cfg["error_correction"].__dict__["output_dir"] = os.path.join(cfg["common"].output_dir, "corrected") + cfg["error_correction"].__dict__["gzip_output"] = not args.disable_gzip_output + if args.qvoffset: + cfg["error_correction"].__dict__["qvoffset"] = args.qvoffset + cfg["error_correction"].__dict__["iontorrent"] = args.iontorrent + cfg["error_correction"].__dict__["max_iterations"] = options_storage.ITERATIONS + if args.meta or args.large_genome: + cfg["error_correction"].__dict__["count_filter_singletons"] = 1 + if args.read_buffer_size: + cfg["error_correction"].__dict__["read_buffer_size"] = args.read_buffer_size + + # assembly + if not args.only_error_correction: + if args.k_mers == "auto" and args.restart_from is None: + args.k_mers = None + if args.k_mers: + cfg["assembly"].__dict__["iterative_K"] = args.k_mers + elif args.rna: + cfg["assembly"].__dict__["iterative_K"] = "auto" + else: + cfg["assembly"].__dict__["iterative_K"] = options_storage.K_MERS_SHORT + cfg["assembly"].__dict__["disable_rr"] = args.disable_rr + cfg["assembly"].__dict__["cov_cutoff"] = args.cov_cutoff + cfg["assembly"].__dict__["lcer_cutoff"] = args.lcer_cutoff + cfg["assembly"].__dict__["save_gp"] = args.save_gp + if args.read_buffer_size: + cfg["assembly"].__dict__["read_buffer_size"] = args.read_buffer_size + cfg["assembly"].__dict__["correct_scaffolds"] = options_storage.correct_scaffolds + + # corrector can work only if contigs exist (not only error correction) + if (not args.only_error_correction) and args.mismatch_corrector: + cfg["mismatch_corrector"] = empty_config() + cfg["mismatch_corrector"].__dict__["skip-masked"] = None + cfg["mismatch_corrector"].__dict__["bwa"] = os.path.join(bin_home, "spades-bwa") + cfg["mismatch_corrector"].__dict__["threads"] = args.threads + cfg["mismatch_corrector"].__dict__["output-dir"] = args.output_dir + cfg["run_truseq_postprocessing"] = options_storage.run_truseq_postprocessing + + +def postprocessing(args, cfg, dataset_data, log, spades_home, load_processed_dataset, restart_from, options=None): + if sys.version.startswith("2."): + import pyyaml2 as pyyaml + elif sys.version.startswith("3."): + import pyyaml3 as pyyaml + + if args.test_mode: + if args.plasmid: + support.add_to_dataset("-1", os.path.join(spades_home, "test_dataset_plasmid/pl1.fq.gz"), dataset_data) + support.add_to_dataset("-2", os.path.join(spades_home, "test_dataset_plasmid/pl2.fq.gz"), dataset_data) + else: + support.add_to_dataset("-1", os.path.join(spades_home, "test_dataset/ecoli_1K_1.fq.gz"), dataset_data) + support.add_to_dataset("-2", os.path.join(spades_home, "test_dataset/ecoli_1K_2.fq.gz"), dataset_data) + + if args.bio: + args.meta = True + if not args.output_dir: + support.error("the output_dir is not set! It is a mandatory parameter (-o output_dir).", log) + if not os.path.isdir(args.output_dir): + if args.continue_mode: + support.error("the output_dir should exist for --continue and for --restart-from!", log) + os.makedirs(args.output_dir) + if args.restart_from or restart_from: + if args.continue_mode: # saving parameters specified with --restart-from + if not support.dataset_is_empty(dataset_data): + support.error("you cannot specify reads with --restart-from option!", log) + save_restart_options() + else: # overriding previous run parameters + load_restart_options() + elif args.continue_mode: # it is just --continue, NOT --restart-from + continue_parser = argparse.ArgumentParser(add_help=False) + continue_parser.add_argument("--continue", dest="continue_mode", action="store_true") + continue_parser.add_argument("-o", type=str, dest="output_dir", action=StoreUniqueAction) + nargs, unknown_args = continue_parser.parse_known_args(options) + if unknown_args: + support.error("you cannot specify any option except -o with --continue option! " + "Please use '--restart-from last' if you need to change some " + "of the options from the initial run and continue from the last available checkpoint.", log) + if args.meta: + if args.careful or args.mismatch_corrector or (args.cov_cutoff != "off" and args.cov_cutoff is not None): + support.error("you cannot specify --careful, --mismatch-correction or --cov-cutoff in metagenomic mode!", + log) + if args.rna: + if args.careful: + support.error("you cannot specify --careful in RNA-Seq mode!", log) + + modes_count = [args.meta, args.large_genome, args.truseq_mode, args.rna, args.plasmid, args.single_cell, args.isolate].count(True) + if modes_count > 1 and [args.meta, args.plasmid].count(True) < 2: + support.error("you cannot simultaneously use more than one mode out of " + "Isolate, Metagenomic, Large genome, Illumina TruSeq, RNA-Seq, Plasmid, and Single-cell (except combining Metagenomic and Plasmid)!", log) + elif modes_count == 0: + support.warning("No assembly mode was sepcified! If you intend to assemble high-coverage multi-cell/isolate data, use '--isolate' option.") + + if args.continue_mode: + return None + + existing_dataset_data = None + processed_dataset_fpath = os.path.join(args.output_dir, "input_dataset.yaml") + if load_processed_dataset: + if os.path.isfile(processed_dataset_fpath): + try: + existing_dataset_data = pyyaml.load(open(processed_dataset_fpath)) + except pyyaml.YAMLError: + existing_dataset_data = None + if existing_dataset_data is not None: + dataset_data = existing_dataset_data + else: + if args.dataset_yaml_filename: + try: + dataset_data = pyyaml.load(open(args.dataset_yaml_filename)) + except pyyaml.YAMLError: + _, exc, _ = sys.exc_info() + support.error( + "exception caught while parsing YAML file (%s):\n" % args.dataset_yaml_filename + str(exc)) + dataset_data = support.relative2abs_paths(dataset_data, + os.path.dirname(args.dataset_yaml_filename)) + else: + dataset_data = support.correct_dataset(dataset_data) + dataset_data = support.relative2abs_paths(dataset_data, os.getcwd()) + args.dataset_yaml_filename = processed_dataset_fpath + + support.check_dataset_reads(dataset_data, (args.only_assembler or args.rna), args.iontorrent, log) + if not support.get_lib_ids_by_type(dataset_data, options_storage.READS_TYPES_USED_IN_CONSTRUCTION): + support.error("you should specify at least one unpaired, paired-end, or high-quality mate-pairs library!") + if args.rna: + if len(dataset_data) != len( + support.get_lib_ids_by_type(dataset_data, options_storage.READS_TYPES_USED_IN_RNA_SEQ)): + support.error("you cannot specify any data types except " + + ", ".join(options_storage.READS_TYPES_USED_IN_RNA_SEQ) + " in RNA-Seq mode!") + # if len(support.get_lib_ids_by_type(dataset_data, 'paired-end')) > 1: + # support.error('you cannot specify more than one paired-end library in RNA-Seq mode!') + if args.meta and not args.only_error_correction: + if len(support.get_lib_ids_by_type(dataset_data, "paired-end")) != 1 or \ + len(dataset_data) - min(1, len( + support.get_lib_ids_by_type(dataset_data, ["tslr", "pacbio", "nanopore"]))) > 1: + support.error("you cannot specify any data types except a single paired-end library " + "(optionally accompanied by a single library of " + "TSLR-contigs, or PacBio reads, or Nanopore reads) in metaSPAdes mode!") + + if existing_dataset_data is None: + with open(args.dataset_yaml_filename, 'w') as f: + pyyaml.dump(dataset_data, f, + default_flow_style=False, default_style='"', width=float("inf")) + + set_default_values() + return dataset_data + + +def parse_args(log, bin_home, spades_home, secondary_filling, restart_from=False, options=None): + cfg = dict() + parser = create_parser() + + if secondary_filling: + old_output_dir = options_storage.args.output_dir + old_stop_after = options_storage.args.stop_after + + skip_output_dir = secondary_filling + load_processed_dataset = secondary_filling + + options_storage.args, argv = parser.parse_known_args(options) + + if options_storage.args.restart_from is not None and not secondary_filling: + for arg in options_storage.args.__dict__: + parser.set_defaults(**{arg: None}) + options_storage.args, argv = parser.parse_known_args(options) + + if argv: + msg = "Please specify option (e.g. -1, -2, -s, etc)) for the following paths: %s" + parser.error(msg % ", ".join(argv)) + + if secondary_filling: + options_storage.args.output_dir = old_output_dir + options_storage.args.stop_after = old_stop_after + + add_to_option(options_storage.args, log, skip_output_dir) + + if "dataset_data" in options_storage.args: + dataset_data = options_storage.args.dataset_data + else: + dataset_data = init_dataset_data() + dataset_data = postprocessing(options_storage.args, cfg, dataset_data, log, spades_home, + load_processed_dataset, restart_from, options) + + if options_storage.args.continue_mode: + return options_storage.args, None, None + + add_to_cfg(cfg, log, bin_home, spades_home, options_storage.args) + return options_storage.args, cfg, dataset_data + + +def usage(spades_version, show_hidden=False, mode=None): + parser = create_parser() + parser.print_help() + + +def set_default_values(): + if options_storage.args.threads is None: + options_storage.args.threads = options_storage.THREADS + if options_storage.args.memory is None: + if support.get_available_memory(): + options_storage.args.memory = int(min(options_storage.MEMORY, support.get_available_memory())) + else: + options_storage.args.memory = options_storage.MEMORY + if options_storage.args.disable_gzip_output is None: + options_storage.args.disable_gzip_output = False + if options_storage.args.disable_rr is None: + options_storage.args.disable_rr = False + if options_storage.args.careful is None: + options_storage.args.careful = False + if options_storage.args.mismatch_corrector is None: + options_storage.args.mismatch_corrector = False + if options_storage.args.checkpoints is None: + options_storage.args.checkpoints = "none" + if options_storage.args.developer_mode is None: + options_storage.args.developer_mode = False + if options_storage.args.qvoffset == "auto": + options_storage.args.qvoffset = None + if options_storage.args.cov_cutoff is None: + options_storage.args.cov_cutoff = "off" + if options_storage.args.tmp_dir is None: + options_storage.args.tmp_dir = os.path.join(options_storage.args.output_dir, options_storage.TMP_DIR) + if options_storage.args.large_genome is None: + options_storage.args.large_genome = False + if options_storage.args.truseq_mode is None: + options_storage.args.truseq_mode = False + if options_storage.args.save_gp is None: + options_storage.args.save_gp = False + if options_storage.args.only_assembler is None: + options_storage.args.only_assembler = False + if options_storage.args.only_error_correction is None: + options_storage.args.only_error_correction = False + + +def save_restart_options(): + options_storage.restart = argparse.Namespace(**vars(options_storage.args)) + options_storage.restart.continue_mode = None + options_storage.restart.restart_from = None + options_storage.restart.output_dir = None + + +def load_restart_options(): + if "k_mers" in options_storage.restart and options_storage.restart.k_mers: + options_storage.original_k_mers = options_storage.args.k_mers + if options_storage.restart.k_mers == "auto": + options_storage.args.k_mers = None # set by default + else: + options_storage.args.k_mers = options_storage.restart.k_mers + options_storage.restart.k_mers = None + + for option in options_storage.restart.__dict__: + if options_storage.restart.__dict__[option] is not None: + options_storage.args.__dict__[option] = options_storage.restart.__dict__[option] + + +def enable_truseq_mode(): + options_storage.K_MERS_SHORT = [21, 33, 45, 55] + options_storage.K_MERS_150 = [21, 33, 45, 55, 77] + options_storage.K_MERS_250 = [21, 33, 45, 55, 77, 99, 127] + options_storage.args.truseq_mode = True + options_storage.correct_scaffolds = True + options_storage.run_truseq_postprocessing = True + options_storage.args.only_assembler = True + + +def will_rerun(options): + for opt, arg in options: + if opt == "--continue" or opt.startswith( + "--restart-from"): # checks both --restart-from k33 and --restart-from=k33 + return True + return False + + +def is_first_run(): + continue_parser = argparse.ArgumentParser(add_help=False) + continue_parser.add_argument("--continue", dest="continue_mode", action="store_true") + continue_parser.add_argument("--restart-from", dest="restart_from", default=None, type=restart_from, action="store") + nargs, unknown_args = continue_parser.parse_known_args() + return not (nargs.continue_mode or nargs.restart_from is not None) + + +def get_output_dir_from_args(): + output_parser = argparse.ArgumentParser(add_help=False) + output_parser.add_argument("-o", type=str, dest="output_dir", action=StoreUniqueAction) + nargs, unknown_args = output_parser.parse_known_args() + if nargs.output_dir is None: + return None + return abspath(expanduser(nargs.output_dir)) diff --git a/src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/options_storage.py b/src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/options_storage.py new file mode 100644 index 0000000..0110d51 --- /dev/null +++ b/src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/options_storage.py @@ -0,0 +1,96 @@ +#!/usr/bin/env python + +############################################################################ +# Copyright (c) 2015-2019 Saint Petersburg State University +# Copyright (c) 2011-2014 Saint Petersburg Academic University +# All Rights Reserved +# See file LICENSE for details. +############################################################################ + +import os + +# for restarting SPAdes +original_k_mers = None + +dict_of_prefixes = dict() +dict_of_rel2abs = dict() + +correct_scaffolds = False +run_truseq_postprocessing = False + +SUPPORTED_PYTHON_VERSIONS = ["2.7", "3.2+"] # major.minor format only, close ("-") and open ("+") ranges allowed +# allowed reads extensions for BayesHammer and for thw whole SPAdes pipeline +BH_ALLOWED_READS_EXTENSIONS = [".fq", ".fastq", ".bam", ".fq.gz", ".fastq.gz"] +IONTORRENT_ONLY_ALLOWED_READS_EXTENSIONS = [".bam"] +CONTIGS_ALLOWED_READS_EXTENSIONS = [".fa", ".fasta", ".fa.gz", ".fasta.gz"] +ALLOWED_READS_EXTENSIONS = BH_ALLOWED_READS_EXTENSIONS + CONTIGS_ALLOWED_READS_EXTENSIONS + +# we support up to MAX_LIBS_NUMBER libs for each type of short-reads libs +MAX_LIBS_NUMBER = 9 +OLD_STYLE_READS_OPTIONS = ["--12", "-1", "-2", "-s", "--merged"] +SHORT_READS_TYPES = {"pe": "paired-end", "s": "single", "mp": "mate-pairs", "hqmp": "hq-mate-pairs", "nxmate": "nxmate"} +# other libs types: +LONG_READS_TYPES = ["pacbio", "sanger", "nanopore", "tslr", "trusted-contigs", "untrusted-contigs", "fl-rna"] + +SHORT_STAGES_NAME = ["ec", "as", "mc", "scc", "tpp"] + +# final contigs and scaffolds names +contigs_name = "contigs.fasta" +scaffolds_name = "scaffolds.fasta" +assembly_graph_name = "assembly_graph.fastg" +assembly_graph_name_gfa = "assembly_graph_with_scaffolds.gfa" +contigs_paths = "contigs.paths" +scaffolds_paths = "scaffolds.paths" +transcripts_name = "transcripts.fasta" +transcripts_paths = "transcripts.paths" +filtering_types = ["hard", "soft", "default"] +bgc_stats_name = "bgc_statistics.txt" +gene_clusters_name = "gene_clusters.fasta" +domain_graph_name = "domain_graph.dot" + +pipeline_state_dir = "pipeline_state" +biosyntheticspades_hmms = "biosynthetic_spades_hmms" + +# other constants +MIN_K = 1 +MAX_K = 127 +RNA_MIN_K = 29 +RNA_MAX_LOWER_K = 55 +THRESHOLD_FOR_BREAKING_SCAFFOLDS = 3 +THRESHOLD_FOR_BREAKING_ADDITIONAL_CONTIGS = 10 +GAP_CLOSER_ENABLE_MIN_K = 55 +SCC_K = 21 + +# default values constants +THREADS = 16 +MEMORY = 250 +K_MERS_RNA = [33, 49] +K_MERS_SHORT = [21, 33, 55] +K_MERS_150 = [21, 33, 55, 77] +K_MERS_250 = [21, 33, 55, 77, 99, 127] +K_MERS_PLASMID_100 = [21, 33, 55, 77] +K_MERS_PLASMID_LONG = [21, 33, 55, 77, 99, 127] + +ITERATIONS = 1 +TMP_DIR = "tmp" + +READS_TYPES_USED_IN_CONSTRUCTION = ["paired-end", "single", "hq-mate-pairs"] +READS_TYPES_USED_IN_RNA_SEQ = ["paired-end", "single", "trusted-contigs", "untrusted-contigs", "pacbio", "nanopore", "fl-rna"] + +BASE_STAGE = "read_conversion" +LAST_STAGE = "last" + +first_command_line = None +args = None + + +# get path to checkpoint stage file +def get_stage_filename(stage_num, stage_short_name): + stage_file_name = "stage_%d_%s" % (stage_num, stage_short_name) + stage_checkpoint_path = os.path.join(args.output_dir, pipeline_state_dir, stage_file_name) + return stage_checkpoint_path + + +# kmers were set by default, not SC, not IonTorrent data and not rna and temporary not meta (except metaplasmid) +def auto_K_allowed(): + return not args.k_mers and not args.single_cell and not args.iontorrent and not (args.meta and not args.plasmid) diff --git a/src/SPAdes-3.10.1-Linux/share/spades/spades_pipeline/process_cfg.py b/src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/process_cfg.py similarity index 92% rename from src/SPAdes-3.10.1-Linux/share/spades/spades_pipeline/process_cfg.py rename to src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/process_cfg.py index c8ab087..63357bd 100644 --- a/src/SPAdes-3.10.1-Linux/share/spades/spades_pipeline/process_cfg.py +++ b/src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/process_cfg.py @@ -1,16 +1,16 @@ #!/usr/bin/env python ############################################################################ -# Copyright (c) 2015 Saint Petersburg State University +# Copyright (c) 2015-2019 Saint Petersburg State University # Copyright (c) 2011-2014 Saint Petersburg Academic University # All Rights Reserved # See file LICENSE for details. ############################################################################ -import sys import support + class cfg_placeholder: pass @@ -32,7 +32,7 @@ def skip_double_quotes(line): def check_property(prop_line): - if len(prop_line.split()) > 1: # property is set, i.e. has value + if len(prop_line.split()) > 1: # property is set, i.e. has value if prop_line.split()[1] != "N/A": return True return False @@ -88,14 +88,14 @@ def substitute_params(filename, var_dict, log): for var, value in var_dict.items(): if var not in vars_in_file: - support.error("Couldn't find " + var + " in " + filename, log) + support.error("Couldn't find %s in %s" % (var, filename), log) meta = vars_in_file[var] lines[meta.line_num] = meta.indent + str(var) + " " + str(value) + "\n" - file = open(filename, "w") - file.writelines(lines) - file.close() + f = open(filename, "w") + f.writelines(lines) + f.close() # configs with more priority should go first in parameters @@ -119,7 +119,7 @@ def load_value(value): elif value.isdigit(): return int(value) else: - return value #string as-is + return value # string as-is def load_value_list(value_list): if len(value_list) > 1: diff --git a/src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/run_contig_breaker.py b/src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/run_contig_breaker.py new file mode 100644 index 0000000..86660b9 --- /dev/null +++ b/src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/run_contig_breaker.py @@ -0,0 +1,46 @@ +#!/usr/bin/env python + +############################################################################ +# Copyright (c) 2015 Saint Petersburg State University +# All Rights Reserved +# See file LICENSE for details. +############################################################################ + +import os +import sys + +pipeline_modules_home = 'src/spades_pipeline/' # os.path.dirname(os.path.realpath(__file__) +sys.path.append(os.path.join(pipeline_modules_home, "common")) +sys.path.append(os.path.join(pipeline_modules_home, "truspades")) + +# import alignment +import sam_parser +import break_by_coverage +import SeqIO + + +def break_contigs(contigs_file, sam_file, output_file): + contigs = list(SeqIO.parse(open(contigs_file, "rU"), "fasta")) + # sam = sam_parser.SamChain([sam_parser.Samfile(sam_file) for sam_file in sam_files]) + sam = sam_parser.Samfile(sam_file) + # last two arguments: K, min0 stretch length to break + coverage_breaker = break_by_coverage.ContigBreaker(contigs, sam, 100, 50) + coverage_breaker.OutputBroken(output_file) + # contigs = list(SeqIO.parse(open(contigs_file, "rU"), "fasta")) + # output = open(output_file, "w") + # for contig in contigs: + # for subcontig in coverage_breaker.Break(contig): + # SeqIO.write(subcontig, output, "fasta") + # output.close() + + +if __name__ == '__main__': + + if len(sys.argv) < 4: + sys.stderr.write("Usage: %s \n" % sys.argv[0]) + exit(1) + + contigs_file = sys.argv[1] + sam_file = sys.argv[2] + output_file = sys.argv[3] + break_contigs(contigs_file, sam_file, output_file); diff --git a/src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/scripts/breaking_scaffolds_script.py b/src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/scripts/breaking_scaffolds_script.py new file mode 100644 index 0000000..08d1c99 --- /dev/null +++ b/src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/scripts/breaking_scaffolds_script.py @@ -0,0 +1,51 @@ +#!/usr/bin/env python + +############################################################################ +# Copyright (c) 2015-2019 Saint Petersburg State University +# Copyright (c) 2011-2014 Saint Petersburg Academic University +# All Rights Reserved +# See file LICENSE for details. +############################################################################ + +import argparse +import os +import sys +from os.path import abspath, dirname, realpath, join + + +def parse_args(): + parser = argparse.ArgumentParser() + parser.add_argument("--result_scaffolds_filename", + help="path to result scaffolds filename", + action="store") + parser.add_argument("--misc_dir", + help="path to misc dir", + action="store") + parser.add_argument("--threshold_for_breaking_scaffolds", + dest="THRESHOLD_FOR_BREAKING_SCAFFOLDS", + type=int, + help="threshold for breaking scaffolds", + action="store") + return parser.parse_args() + +def main(): + args = parse_args() + + # init python_lib_folder + python_modules_home = abspath(dirname(realpath(__file__))) + sys.path.append(join(python_modules_home, "..")) + import support + + if os.path.isfile(args.result_scaffolds_filename): + if not os.path.isdir(args.misc_dir): + os.makedirs(args.misc_dir) + + result_broken_scaffolds = os.path.join(args.misc_dir, "broken_scaffolds.fasta") + modified, broken_scaffolds = support.break_scaffolds(args.result_scaffolds_filename, + args.THRESHOLD_FOR_BREAKING_SCAFFOLDS) + if modified: + support.write_fasta(result_broken_scaffolds, broken_scaffolds) + + +if __name__ == "__main__": + main() diff --git a/src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/scripts/check_test_script.py b/src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/scripts/check_test_script.py new file mode 100644 index 0000000..eb179a0 --- /dev/null +++ b/src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/scripts/check_test_script.py @@ -0,0 +1,88 @@ +#!/usr/bin/env python + +############################################################################ +# Copyright (c) 2015-2019 Saint Petersburg State University +# Copyright (c) 2011-2014 Saint Petersburg Academic University +# All Rights Reserved +# See file LICENSE for details. +############################################################################ + +import argparse +import logging +import os +import sys +from os.path import abspath, dirname, realpath, join + + +def parse_args(): + parser = argparse.ArgumentParser() + parser.add_argument("--mode", + choices=["common", "truseq", "rna", "plasmid"], + help="running mode", + action="store") + parser.add_argument("--truseq_long_reads_file", + help="path to truseq long reads", + action="store") + parser.add_argument("--result_transcripts_filename", + help="path to file with result transcripts", + action="store") + parser.add_argument("--result_contigs_filename", + help="path to file with result contigs", + action="store") + parser.add_argument("--result_scaffolds_filename", + help="path to file with result scaffolds", + action="store") + return parser.parse_args() + + +def main(): + args = parse_args() + + # init python_lib_folder + python_modules_home = abspath(dirname(realpath(__file__))) + sys.path.append(join(python_modules_home, "..")) + import support + + # create logger + log = logging.getLogger("Check test") + log.setLevel(logging.DEBUG) + console = logging.StreamHandler(sys.stdout) + console.setFormatter(logging.Formatter("%(message)s")) + console.setLevel(logging.DEBUG) + log.addHandler(console) + + if args.mode == "truseq": + if not os.path.isfile(args.truseq_long_reads_file): + support.error("TEST FAILED: %s does not exist!" % args.truseq_long_reads_file) + elif args.mode == "rna": + if not os.path.isfile(args.result_transcripts_filename): + support.error("TEST FAILED: %s does not exist!" % args.result_transcripts_filename) + else: + for result_filename in [args.result_contigs_filename, args.result_scaffolds_filename]: + if os.path.isfile(result_filename): + result_fasta = list(support.read_fasta(result_filename)) + # correctness check: should be one contig of length 1000 bp + correct_number = 1 + if args.mode == "plasmid" or args.mode == "metaplasmid": + correct_length = 9689 + else: + correct_length = 1000 + if not len(result_fasta): + support.error("TEST FAILED: %s does not contain contigs!" % result_filename) + elif len(result_fasta) > correct_number: + support.error("TEST FAILED: %s contains more than %d contig (%d)!" % + (result_filename, correct_number, len(result_fasta))) + elif len(result_fasta[0][1]) != correct_length: + if len(result_fasta[0][1]) > correct_length: + relation = "more" + else: + relation = "less" + support.error("TEST FAILED: %s contains %s than %d bp (%d bp)!" % + (result_filename, relation, correct_length, len(result_fasta[0][1]))) + else: + support.error("TEST FAILED: %s does not exist!" % result_filename) + log.info("\n========= TEST PASSED CORRECTLY.") + + +if __name__ == "__main__": + main() diff --git a/src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/scripts/compress_all.py b/src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/scripts/compress_all.py new file mode 100644 index 0000000..f532627 --- /dev/null +++ b/src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/scripts/compress_all.py @@ -0,0 +1,126 @@ +#!/usr/bin/env python + +############################################################################ +# Copyright (c) 2015-2019 Saint Petersburg State University +# Copyright (c) 2011-2014 Saint Petersburg Academic University +# All Rights Reserved +# See file LICENSE for details. +############################################################################ + +import argparse +import glob +import logging +import os +import sys +from site import addsitedir +from os.path import abspath, dirname, realpath, join, isfile + +python_modules_home = abspath(dirname(realpath(__file__))) +sys.path.append(join(python_modules_home, "..")) +import support + + +def remove_not_corrected_reads(output_dir): + for not_corrected in glob.glob(os.path.join(output_dir, "*.bad.fastq")): + os.remove(not_corrected) + + +def compress_dataset_files(input_file, ext_python_modules_home, max_threads, log, not_used_yaml_file, output_dir, + gzip_output): + addsitedir(ext_python_modules_home) + if sys.version.startswith("2."): + import pyyaml2 as pyyaml + from joblib2 import Parallel, delayed + elif sys.version.startswith("3."): + import pyyaml3 as pyyaml + from joblib3 import Parallel, delayed + + dataset_data = pyyaml.load(open(input_file)) + remove_not_corrected_reads(output_dir) + is_changed = False + if gzip_output: + is_changed = True + pigz_path = support.which("pigz") + if pigz_path: + compressor = "pigz" + else: + compressor = "gzip" + log.info("\n== Compressing corrected reads (with %s)" % compressor) + to_compress = [] + for reads_library in dataset_data: + for key, value in reads_library.items(): + if key.endswith("reads"): + compressed_reads_filenames = [] + for reads_file in value: + compressed_reads_filenames.append(reads_file + ".gz") + to_compress.append(reads_file) + reads_library[key] = compressed_reads_filenames + + if len(to_compress): + for reads_file in to_compress: + if not isfile(reads_file): + support.error( + "something went wrong and file with corrected reads (%s) is missing!" % reads_file, log) + + if pigz_path: + for reads_file in to_compress: + support.sys_call([pigz_path, "-f", "-7", "-p", str(max_threads), reads_file], log) + else: + n_jobs = min(len(to_compress), max_threads) + outputs = Parallel(n_jobs=n_jobs)( + delayed(support.sys_call)(["gzip", "-f", "-7", reads_file]) for reads_file in to_compress) + for output in outputs: + if output: + log.info(output) + + if not_used_yaml_file != "": + is_changed = True + not_used_dataset_data = pyyaml.load(open(not_used_yaml_file)) + dataset_data += not_used_dataset_data + if is_changed: + with open(input_file, 'w') as f: + pyyaml.dump(dataset_data, f, + default_flow_style=False, default_style='"', width=float("inf")) + + +def parse_args(): + parser = argparse.ArgumentParser() + parser.add_argument("--input_file", + help="path to input file", + action="store") + parser.add_argument("--ext_python_modules_home", + help="path to ext python modules home", + action="store") + parser.add_argument("--max_threads", + type=int, + help="max threads", + action="store") + parser.add_argument("--output_dir", + help="path to output dir", + action="store") + parser.add_argument("--gzip_output", + help="flag for enable gziping", + action="store_true") + parser.add_argument("--not_used_yaml_file", + default="", + help="path to yaml file with not used data during error correction", + action="store") + return parser.parse_args() + + +def main(): + args = parse_args() + + log = logging.getLogger("compressing") + log.setLevel(logging.DEBUG) + console = logging.StreamHandler(sys.stdout) + console.setFormatter(logging.Formatter("%(message)s")) + console.setLevel(logging.DEBUG) + log.addHandler(console) + + compress_dataset_files(args.input_file, args.ext_python_modules_home, args.max_threads, log, + args.not_used_yaml_file, args.output_dir, args.gzip_output) + + +if __name__ == "__main__": + main() diff --git a/src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/scripts/copy_files.py b/src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/scripts/copy_files.py new file mode 100644 index 0000000..c4a4cab --- /dev/null +++ b/src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/scripts/copy_files.py @@ -0,0 +1,29 @@ +#!/usr/bin/env python + +############################################################################ +# Copyright (c) 2019 Saint Petersburg State University +# All Rights Reserved +# See file LICENSE for details. +############################################################################ + +import logging +import os +import shutil +import sys + + +def main(args): + log = logging.getLogger("copy files") + log.setLevel(logging.DEBUG) + console = logging.StreamHandler(sys.stdout) + console.setFormatter(logging.Formatter("%(message)s")) + console.setLevel(logging.DEBUG) + log.addHandler(console) + + for inputfilename, outputfilename in zip(args[1::2], args[2::2]): + if os.path.isfile(inputfilename): + shutil.copyfile(inputfilename, outputfilename) + + +if __name__ == "__main__": + main(sys.argv) diff --git a/src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/scripts/correction_iteration_script.py b/src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/scripts/correction_iteration_script.py new file mode 100644 index 0000000..e44bf32 --- /dev/null +++ b/src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/scripts/correction_iteration_script.py @@ -0,0 +1,83 @@ +#!/usr/bin/env python + +############################################################################ +# Copyright (c) 2015-2019 Saint Petersburg State University +# Copyright (c) 2011-2014 Saint Petersburg Academic University +# All Rights Reserved +# See file LICENSE for details. +############################################################################ + +import argparse +import logging +import os +import shutil +import sys + + +def parse_args(): + parser = argparse.ArgumentParser() + parser.add_argument("--corrected", + help="path to file with corrected contigs/scaffolds", + action="store") + parser.add_argument("--assembled", + help="path to file with assembled contigs/scaffolds", + action="store") + parser.add_argument("--assembly_type", + choices=["contigs", "scaffolds"], + help="assembly type: contigs/scaffolds", + action="store") + parser.add_argument("--output_dir", + help="path to output dir", + action="store") + parser.add_argument("--bin_home", + help="path to bin home", + action="store") + return parser.parse_args() + + +def main(): + args = parse_args() + + # create logger + log = logging.getLogger("Mismatch correction " + args.assembly_type) + log.setLevel(logging.DEBUG) + console = logging.StreamHandler(sys.stdout) + console.setFormatter(logging.Formatter("%(message)s")) + console.setLevel(logging.DEBUG) + log.addHandler(console) + + # moving assembled contigs (scaffolds) to misc dir + if os.path.isfile(args.corrected): + shutil.move(args.corrected, args.assembled) + + # TODO can check only here, that assembled existst and may be skipping... + if not os.path.isfile(args.assembled) or os.path.getsize(args.assembled) == 0: + log.info("\n== Skipping processing of %s (empty file)\n" % args.assembly_type) + else: + log.info("\n== Processing of %s\n" % args.assembly_type) + tmp_dir_for_corrector = os.path.join(args.output_dir, "mismatch_corrector", args.assembly_type) + + # correcting + result_corrected_filename = os.path.join(tmp_dir_for_corrector, "corrected_contigs.fasta") + + dst_configs = os.path.join(tmp_dir_for_corrector, "configs") + cfg_file_name = os.path.join(dst_configs, "corrector.info") + + binary_name = "spades-corrector-core" + command = [os.path.join(args.bin_home, binary_name), + os.path.abspath(cfg_file_name), os.path.abspath(args.assembled)] + + log.info("\n== Running contig polishing tool: " + ' '.join(command) + "\n") + log.info("\n== Dataset description file was created: " + cfg_file_name + "\n") + log.info("Run: " + ' '.join(command)) + os.system(' '.join(command)) + + if not os.path.isfile(result_corrected_filename): + log.error("mismatch correction finished abnormally: %s not found!" % result_corrected_filename) + + if os.path.isfile(result_corrected_filename): + shutil.copyfile(result_corrected_filename, args.corrected) + + +if __name__ == "__main__": + main() diff --git a/src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/scripts/plasmid_glue.py b/src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/scripts/plasmid_glue.py new file mode 100644 index 0000000..f01ee52 --- /dev/null +++ b/src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/scripts/plasmid_glue.py @@ -0,0 +1,36 @@ +#!/usr/bin/env python + +import os +import sys + + +def main(args): + outdir = args[1] + res = outdir + "/final_contigs.fasta" + res_f = open(res, "w") + for file in os.listdir(outdir): + farr = file.split('.') + if farr[-1] != "fasta": + continue + if farr[-2] != "circular": + continue + arr = farr[-3].split("_") + if len(arr) < 2: + continue + cov = arr[-1] + + # for line in open(os.path.join(dir,file), "r"): + for line in open(os.path.join(outdir,file), "r"): + line = line.strip() + if len(line) > 0 and line[0] == ">": + line += "_cutoff_" + cov + res_f.write(line+ "\n") + res_f.close() + scaff = outdir + "/scaffolds.fasta" + from shutil import copyfile + copyfile(res, scaff) +# log.info("====metaplasmid circular contigs can be found here: " + final_res) + +if __name__ == "__main__": + main(sys.argv) + diff --git a/src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/scripts/postprocessing_script.py b/src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/scripts/postprocessing_script.py new file mode 100644 index 0000000..96077c8 --- /dev/null +++ b/src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/scripts/postprocessing_script.py @@ -0,0 +1,92 @@ +#!/usr/bin/env python + +############################################################################ +# Copyright (c) 2015-2019 Saint Petersburg State University +# Copyright (c) 2011-2014 Saint Petersburg Academic University +# All Rights Reserved +# See file LICENSE for details. +############################################################################ + +import argparse +import logging +import os +import shutil +import sys +from os.path import abspath, dirname, realpath, join, isfile +from site import addsitedir + + +def parse_args(): + parser = argparse.ArgumentParser() + parser.add_argument("--result_scaffolds_filename", + help="path to file with result scaffolds", + action="store") + parser.add_argument("--assembled_scaffolds_filename", + help="path to file with assembled scaffolds", + action="store") + parser.add_argument("--bin_home", + help="path to bin home", + action="store") + parser.add_argument("--ext_python_modules_home", + help="path to ext python modules home", + action="store") + parser.add_argument("--output_dir", + help="path to output dir", + action="store") + parser.add_argument("--truseq_long_reads_file_base", + help="path to file with truseq long reads", + action="store") + parser.add_argument("--dataset_yaml_file", + help="path to yaml file with dataset", + action="store") + parser.add_argument("--threads", + type=int, + help="number of threads", + action="store") + return parser.parse_args() + + +def main(): + args = parse_args() + + # create logger + log = logging.getLogger("Postprocessing") + log.setLevel(logging.DEBUG) + console = logging.StreamHandler(sys.stdout) + console.setFormatter(logging.Formatter("%(message)s")) + console.setLevel(logging.DEBUG) + log.addHandler(console) + + addsitedir(args.ext_python_modules_home) + # save dataset from yaml + if sys.version.startswith("2."): + import pyyaml2 as pyyaml + elif sys.version.startswith("3."): + import pyyaml3 as pyyaml + + dataset_data = pyyaml.load(open(args.dataset_yaml_file)) + + # init python_lib_folder + python_modules_home = abspath(dirname(realpath(__file__))) + source_dirs = ["..", "../truspades", "../common", "../executors"] + for dir_name in source_dirs: + sys.path.append(join(python_modules_home, dir_name)) + + # import alignment and molecule_postprocassing + import alignment + import moleculo_postprocessing + # run command + if isfile(args.result_scaffolds_filename): + shutil.move(args.result_scaffolds_filename, args.assembled_scaffolds_filename) + alignment_bin = os.path.join(args.bin_home, "spades-bwa") + alignment_dir = os.path.join(args.output_dir, "alignment") + sam_files = alignment.align_bwa(alignment_bin, args.assembled_scaffolds_filename, + dataset_data, alignment_dir, log, args.threads) + + moleculo_postprocessing.moleculo_postprocessing(args.assembled_scaffolds_filename, + args.truseq_long_reads_file_base, + sam_files, log) + + +if __name__ == "__main__": + main() diff --git a/src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/scripts/preprocess_contigs.py b/src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/scripts/preprocess_contigs.py new file mode 100644 index 0000000..3ac51fa --- /dev/null +++ b/src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/scripts/preprocess_contigs.py @@ -0,0 +1,66 @@ +#!/usr/bin/env python + +############################################################################ +# Copyright (c) 2019 Saint Petersburg State University +# All Rights Reserved +# See file LICENSE for details. +############################################################################ + +import argparse +import logging +import sys +from os.path import abspath, dirname, realpath, join + +# init python_lib_folder +python_modules_home = abspath(dirname(realpath(__file__))) +sys.path.append(join(python_modules_home, "..")) +import support + + +def parse_args(): + parser = argparse.ArgumentParser() + parser.add_argument("--args_filename", + help="path to file with args", + action="store") + parser.add_argument("--dst", + help="path to dst dir", + action="store") + parser.add_argument("--threshold_for_breaking_additional_contigs", + dest="THRESHOLD_FOR_BREAKING_ADDITIONAL_CONTIGS", + type=int, + help="threshold for breaking additional contigs", + action="store") + return parser.parse_args() + + +def main(): + args = parse_args() + + # create logger + log = logging.getLogger("Preprocess additional contigs") + log.setLevel(logging.DEBUG) + console = logging.StreamHandler(sys.stdout) + console.setFormatter(logging.Formatter("%(message)s")) + console.setLevel(logging.DEBUG) + log.addHandler(console) + + with open(args.args_filename) as f: + lines = f.readlines() + for gzipped, old_filename, new_filename in zip(lines[0::3], lines[1::3], lines[2::3]): + gzipped = (gzipped == "True") + old_filename = old_filename.rstrip() + new_filename = new_filename.rstrip() + + modified, new_fasta = support.break_scaffolds(old_filename, + args.THRESHOLD_FOR_BREAKING_ADDITIONAL_CONTIGS, + replace_char='A', gzipped=gzipped) + log.info("== Processing additional contigs (%s): changing Ns to As and " + "splitting by continues (>= %d) Ns fragments (results are in %s directory)" % ( + old_filename, + args.THRESHOLD_FOR_BREAKING_ADDITIONAL_CONTIGS, + args.dst)) + support.write_fasta(new_filename, new_fasta) + + +if __name__ == "__main__": + main() diff --git a/src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/scripts/preprocess_interlaced_reads.py b/src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/scripts/preprocess_interlaced_reads.py new file mode 100644 index 0000000..a7cefa7 --- /dev/null +++ b/src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/scripts/preprocess_interlaced_reads.py @@ -0,0 +1,115 @@ +#!/usr/bin/env python + +############################################################################ +# Copyright (c) 2015-2019 Saint Petersburg State University +# Copyright (c) 2011-2014 Saint Petersburg Academic University +# All Rights Reserved +# See file LICENSE for details. +############################################################################ + + +import argparse +import logging +import sys +import gzip +from os.path import abspath, dirname, realpath, join + +# init python_lib_folder +python_modules_home = abspath(dirname(realpath(__file__))) +sys.path.append(join(python_modules_home, "..")) +import support + + +def write_single_read(in_file, out_file, read_name=None, is_fastq=False, is_python3=False): + if read_name is None: + read_name = support.process_readline(in_file.readline(), is_python3) + if not read_name: + return "" # no next read + read_value = support.process_readline(in_file.readline(), is_python3) + line = support.process_readline(in_file.readline(), is_python3) + fpos = in_file.tell() + while (is_fastq and not line.startswith('+')) or (not is_fastq and not line.startswith('>')): + read_value += line + line = support.process_readline(in_file.readline(), is_python3) + if not line: + if fpos == in_file.tell(): + break + fpos = in_file.tell() + out_file.write(read_name + '\n') + out_file.write(read_value + '\n') + + if is_fastq: + read_quality = support.process_readline(in_file.readline(), is_python3) + line = support.process_readline(in_file.readline(), is_python3) + while not line.startswith('@'): + read_quality += line + line = support.process_readline(in_file.readline(), is_python3) + if not line: + if fpos == in_file.tell(): + break + fpos = in_file.tell() + if len(read_value) != len(read_quality): + support.error("The length of sequence and quality lines should be the same! " + "Check read %s (SEQ length is %d, QUAL length is %d)" % + (read_name, len(read_value), len(read_quality))) + out_file.write("+\n") + out_file.write(read_quality + '\n') + return line # next read name or empty string + + +def parse_args(): + parser = argparse.ArgumentParser() + parser.add_argument("--args_filename", + help="path to file with args", + action="store") + parser.add_argument("--dst", + help="path to dst dir", + action="store") + return parser.parse_args() + + +def main(): + args = parse_args() + + # create logger + log = logging.getLogger("Preprocess interlaced reads") + log.setLevel(logging.DEBUG) + console = logging.StreamHandler(sys.stdout) + console.setFormatter(logging.Formatter("%(message)s")) + console.setLevel(logging.DEBUG) + log.addHandler(console) + + with open(args.args_filename) as f: + lines = f.readlines() + lines = [x.rstrip() for x in lines] + for input_filename, out_left_filename, out_right_filename, was_compressed, is_fastq in \ + zip(lines[0::5], lines[1::5], lines[2::5], lines[3::5], lines[4::5]): + was_compressed = (was_compressed == "True") + is_fastq = (is_fastq == "True") + + if was_compressed: + input_file = gzip.open(input_filename, 'r') + else: + input_file = open(input_filename) + + log.info("== Splitting %s into left and right reads (in %s directory)" % (input_filename, args.dst)) + out_files = [open(out_left_filename, 'w'), open(out_right_filename, 'w')] + i = 0 + next_read_name = write_single_read(input_file, out_files[i], None, is_fastq, + sys.version.startswith("3.") and was_compressed) + while next_read_name: + i = (i + 1) % 2 + next_read_name = write_single_read(input_file, out_files[i], next_read_name, is_fastq, + sys.version.startswith("3.") and was_compressed) + if i == 0: + support.error( + "the number of reads in file with interlaced reads (%s) should be EVEN!" % (input_filename), + log) + out_files[0].close() + out_files[1].close() + + input_file.close() + + +if __name__ == "__main__": + main() diff --git a/src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/scripts/preprocess_nxmate_reads.py b/src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/scripts/preprocess_nxmate_reads.py new file mode 100644 index 0000000..d5bda54 --- /dev/null +++ b/src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/scripts/preprocess_nxmate_reads.py @@ -0,0 +1,62 @@ +#!/usr/bin/env python + +############################################################################ +# Copyright (c) 2019 Saint Petersburg State University +# All Rights Reserved +# See file LICENSE for details. +############################################################################ + +import argparse +import logging +import sys +from os.path import abspath, dirname, realpath, join, isfile + +# init python_lib_folder +python_modules_home = abspath(dirname(realpath(__file__))) +sys.path.append(join(python_modules_home, "..")) + +if isfile(join(python_modules_home, "../../../spades_init.py")): + sys.path.append(join(python_modules_home, "../../..")) +else: + sys.path.append(join(python_modules_home, "../../../../bin/")) + +import lucigen_nxmate, support + + +def parse_args(): + parser = argparse.ArgumentParser() + parser.add_argument("--args_filename", + help="path to file with args", + action="store") + parser.add_argument("--dst", + help="path to dst dir", + action="store") + parser.add_argument("--threads", + help="number of threads", + default=16, + type=int, + action="store") + return parser.parse_args() + +def main(): + args = parse_args() + + # create logger + log = logging.getLogger("Preprocess Lucigen NxMate reads") + log.setLevel(logging.DEBUG) + console = logging.StreamHandler(sys.stdout) + console.setFormatter(logging.Formatter("%(message)s")) + console.setLevel(logging.DEBUG) + log.addHandler(console) + + try: + with open(args.args_filename) as f: + lines = f.readlines() + for infile1, infile2 in zip(lines[0::2], lines[1::2]): + lucigen_nxmate.process_reads(infile1, infile2, args.dst, log, args.threads) + except ImportError: + support.error("can't process Lucigen NxMate reads! lucigen_nxmate.py is missing!", log) + + +if __name__ == "__main__": + main() diff --git a/src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/stages/__init__.py b/src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/stages/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/stages/breaking_scaffolds_stage.py b/src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/stages/breaking_scaffolds_stage.py new file mode 100644 index 0000000..eddc2d4 --- /dev/null +++ b/src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/stages/breaking_scaffolds_stage.py @@ -0,0 +1,38 @@ +#!/usr/bin/env python + +############################################################################ +# Copyright (c) 2015-2019 Saint Petersburg State University +# Copyright (c) 2011-2014 Saint Petersburg Academic University +# All Rights Reserved +# See file LICENSE for details. +############################################################################ + +import os +import shutil +import sys + +import options_storage +from stages import stage +import commands_parser + + +class BreakingScaffoldsStage(stage.Stage): + STAGE_NAME = "Breaking scaffolds" + + def get_command(self, cfg): + args = [os.path.join(self.python_modules_home, "spades_pipeline", "scripts", "breaking_scaffolds_script.py"), + "--result_scaffolds_filename", self.output_files["result_scaffolds_filename"], + "--misc_dir", self.output_files["misc_dir"], + "--threshold_for_breaking_scaffolds", str(options_storage.THRESHOLD_FOR_BREAKING_SCAFFOLDS)] + + return [commands_parser.Command(STAGE=self.STAGE_NAME, + path=sys.executable, + args=args, + short_name=self.short_name)] + + +def add_to_pipeline(pipeline, cfg, output_files, tmp_configs_dir, dataset_data, log, + bin_home, ext_python_modules_home, python_modules_home): + pipeline.add(BreakingScaffoldsStage("bs", output_files, + tmp_configs_dir, dataset_data, log, bin_home, + ext_python_modules_home, python_modules_home)) diff --git a/src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/stages/check_test_stage.py b/src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/stages/check_test_stage.py new file mode 100644 index 0000000..89e1856 --- /dev/null +++ b/src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/stages/check_test_stage.py @@ -0,0 +1,46 @@ +#!/usr/bin/env python + +############################################################################ +# Copyright (c) 2015-2019 Saint Petersburg State University +# Copyright (c) 2011-2014 Saint Petersburg Academic University +# All Rights Reserved +# See file LICENSE for details. +############################################################################ + +import os +import sys + +import options_storage +from stages import stage +import commands_parser + + +class CheckStageStage(stage.Stage): + STAGE_NAME = "Check test" + + def get_command(self, cfg): + args = [os.path.join(self.python_modules_home, "spades_pipeline", "scripts", "check_test_script.py")] + if options_storage.args.truseq_mode: + args += ["--mode", "truseq", "--truseq_long_reads_file", self.output_files["truseq_long_reads_file"]] + elif options_storage.args.rna: + args += ["--mode", "rna", "--result_transcripts_filename", self.output_files["result_transcripts_filename"]] + else: + if options_storage.args.plasmid: + args += ["--mode", "plasmid"] + else: + args += ["--mode", "common"] + + args += ["--result_contigs_filename", self.output_files["result_contigs_filename"], + "--result_scaffolds_filename", self.output_files["result_scaffolds_filename"]] + + return [commands_parser.Command(STAGE=self.STAGE_NAME, + path=sys.executable, + args=args, + short_name=self.short_name)] + + +def add_to_pipeline(pipeline, cfg, output_files, tmp_configs_dir, dataset_data, log, bin_home, + ext_python_modules_home, python_modules_home): + if options_storage.args.test_mode: + pipeline.add(CheckStageStage("check_test", output_files, tmp_configs_dir, + dataset_data, log, bin_home, ext_python_modules_home, python_modules_home)) diff --git a/src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/stages/correction_stage.py b/src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/stages/correction_stage.py new file mode 100644 index 0000000..eaf2313 --- /dev/null +++ b/src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/stages/correction_stage.py @@ -0,0 +1,122 @@ +#!/usr/bin/env python + +############################################################################ +# Copyright (c) 2015-2019 Saint Petersburg State University +# Copyright (c) 2011-2014 Saint Petersburg Academic University +# All Rights Reserved +# See file LICENSE for details. +############################################################################ + +import os +import shutil +import sys +from distutils import dir_util +from site import addsitedir + +from stages import stage +import support +from process_cfg import merge_configs +import commands_parser +import options_storage + +def prepare_config_corr(filename, cfg, ext_python_modules_home): + addsitedir(ext_python_modules_home) + if sys.version.startswith("2."): + import pyyaml2 as pyyaml + elif sys.version.startswith("3."): + import pyyaml3 as pyyaml + data = pyyaml.load(open(filename)) + data["dataset"] = cfg.dataset + data["output_dir"] = cfg.output_dir + data["work_dir"] = cfg.tmp_dir + # data["hard_memory_limit"] = cfg.max_memory + data["max_nthreads"] = cfg.max_threads + data["bwa"] = cfg.bwa + with open(filename, 'w') as file_c: + pyyaml.dump(data, file_c, + default_flow_style=False, default_style='"', width=float("inf")) + + +class CorrectionIterationStage(stage.Stage): + def __init__(self, cfg, assembly_type, corrected, assembled, *args): + super(CorrectionIterationStage, self).__init__(*args) + self.assembly_type = assembly_type + self.corrected = corrected + self.assembled = assembled + self.STAGE_NAME = "Mismatch correction %s" % assembly_type + + self.tmp_dir_for_corrector = os.path.join(cfg["common"].output_dir, "mismatch_corrector", self.assembly_type) + cfg["mismatch_corrector"].__dict__["output_dir"] = self.tmp_dir_for_corrector + self.cfg = merge_configs(cfg["mismatch_corrector"], cfg["common"]) + + def get_command(self, cfg): + args = [os.path.join(self.python_modules_home, "spades_pipeline", "scripts", "correction_iteration_script.py"), + "--corrected", self.corrected, + "--assembled", self.assembled, + "--assembly_type", self.assembly_type, + "--output_dir", cfg["common"].output_dir, + "--bin_home", self.bin_home] + + return [commands_parser.Command(STAGE=self.STAGE_NAME, + path=sys.executable, + args=args, + config_dir=os.path.relpath(self.cfg.output_dir, options_storage.args.output_dir), + short_name=self.short_name, + del_after=[os.path.join(self.cfg.output_dir, "tmp"), + self.cfg.tmp_dir])] + + def generate_config(self, cfg): + dst_configs = os.path.join(self.cfg.output_dir, "configs") + if os.path.isdir(dst_configs): + shutil.rmtree(dst_configs) + dir_util.copy_tree(os.path.join(self.tmp_configs_dir, "corrector"), dst_configs, preserve_times=False) + cfg_file_name = os.path.join(dst_configs, "corrector.info") + + self.cfg.tmp_dir = support.get_tmp_dir(prefix="corrector_") + prepare_config_corr(cfg_file_name, self.cfg, self.ext_python_modules_home) + + +class CorrectionStage(stage.Stage): + stages = [] + STAGE_NAME = "Mismatch correction" + + def __init__(self, cfg, *args): + super(CorrectionStage, self).__init__(*args) + + cfg["mismatch_corrector"].__dict__["dataset"] = cfg["dataset"].yaml_filename + + to_correct = dict() + to_correct["contigs"] = \ + (self.output_files["result_contigs_filename"], self.output_files["assembled_contigs_filename"]) + to_correct["scaffolds"] = \ + (self.output_files["result_scaffolds_filename"], self.output_files["assembled_scaffolds_filename"]) + + for assembly_type, (corrected, assembled) in to_correct.items(): + self.stages.append(CorrectionIterationStage(cfg, assembly_type, corrected, assembled, + "mc_%s" % assembly_type, + self.output_files, + self.tmp_configs_dir, self.dataset_data, self.log, + self.bin_home, self.ext_python_modules_home, + self.python_modules_home)) + + def generate_config(self, cfg): + for stage in self.stages: + stage.generate_config(cfg) + + def get_command(self, cfg): + return [commands_parser.Command(STAGE=self.STAGE_NAME, + path="true", + args=[], + short_name=self.short_name + "_start")] + \ + [x for stage in self.stages for x in stage.get_command(cfg)] + \ + [commands_parser.Command(STAGE=self.STAGE_NAME, + path="true", + args=[], + short_name=self.short_name + "_finish")] + + +def add_to_pipeline(pipeline, cfg, output_files, tmp_configs_dir, dataset_data, log, bin_home, + ext_python_modules_home, python_modules_home): + if "assembly" in cfg and "mismatch_corrector" in cfg: + pipeline.add(CorrectionStage(cfg, "mc", output_files, tmp_configs_dir, dataset_data, + log, bin_home, ext_python_modules_home, python_modules_home)) diff --git a/src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/stages/error_correction_stage.py b/src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/stages/error_correction_stage.py new file mode 100644 index 0000000..b635a43 --- /dev/null +++ b/src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/stages/error_correction_stage.py @@ -0,0 +1,192 @@ +#!/usr/bin/env python + +############################################################################ +# Copyright (c) 2015-2019 Saint Petersburg State University +# Copyright (c) 2011-2014 Saint Petersburg Academic University +# All Rights Reserved +# See file LICENSE for details. +############################################################################ + +import os +import shutil +import sys +from distutils import dir_util +from site import addsitedir + +import commands_parser +import options_storage +from stages import stage +import process_cfg +import support +from process_cfg import merge_configs + + +class ECRunningToolStage(stage.Stage): + def prepare_config_bh(self, filename, cfg, log): + subst_dict = dict() + subst_dict["dataset"] = process_cfg.process_spaces(cfg.dataset_yaml_filename) + subst_dict["input_working_dir"] = process_cfg.process_spaces(cfg.tmp_dir) + subst_dict["output_dir"] = process_cfg.process_spaces(cfg.output_dir) + subst_dict["general_max_iterations"] = options_storage.ITERATIONS + subst_dict["general_max_nthreads"] = cfg.max_threads + subst_dict["count_merge_nthreads"] = cfg.max_threads + subst_dict["bayes_nthreads"] = cfg.max_threads + subst_dict["expand_nthreads"] = cfg.max_threads + subst_dict["correct_nthreads"] = cfg.max_threads + subst_dict["general_hard_memory_limit"] = cfg.max_memory + if "qvoffset" in cfg.__dict__: + subst_dict["input_qvoffset"] = cfg.qvoffset + if "count_filter_singletons" in cfg.__dict__: + subst_dict["count_filter_singletons"] = cfg.count_filter_singletons + if "read_buffer_size" in cfg.__dict__: + subst_dict["count_split_buffer"] = cfg.read_buffer_size + process_cfg.substitute_params(filename, subst_dict, log) + + def prepare_config_ih(self, filename, cfg, ext_python_modules_home): + addsitedir(ext_python_modules_home) + if sys.version.startswith("2."): + import pyyaml2 as pyyaml + elif sys.version.startswith("3."): + import pyyaml3 as pyyaml + data = pyyaml.load(open(filename)) + data["dataset"] = cfg.dataset_yaml_filename + data["working_dir"] = cfg.tmp_dir + data["output_dir"] = cfg.output_dir + data["hard_memory_limit"] = cfg.max_memory + data["max_nthreads"] = cfg.max_threads + with open(filename, 'w') as f: + pyyaml.dump(data, f, + default_flow_style=False, default_style='"', width=float("inf")) + + def generate_config(self, cfg): + dst_configs = os.path.join(cfg.output_dir, "configs") + if os.path.isdir(dst_configs): + shutil.rmtree(dst_configs) + if cfg.iontorrent: + dir_util.copy_tree(os.path.join(self.tmp_configs_dir, "ionhammer"), dst_configs, preserve_times=False) + cfg_file_name = os.path.join(dst_configs, "ionhammer.cfg") + else: + dir_util.copy_tree(os.path.join(self.tmp_configs_dir, "hammer"), dst_configs, preserve_times=False) + cfg_file_name = os.path.join(dst_configs, "config.info") + + cfg.tmp_dir = support.get_tmp_dir(prefix="hammer_") + if cfg.iontorrent: + self.prepare_config_ih(cfg_file_name, cfg, self.ext_python_modules_home) + else: + self.prepare_config_bh(cfg_file_name, cfg, self.log) + + def get_command(self, cfg): + dst_configs = os.path.join(cfg.output_dir, "configs") + if cfg.iontorrent: + cfg_file_name = os.path.join(dst_configs, "ionhammer.cfg") + else: + cfg_file_name = os.path.join(dst_configs, "config.info") + + if cfg.iontorrent: + binary_name = "spades-ionhammer" + else: + binary_name = "spades-hammer" + + command = [commands_parser.Command(STAGE="Read error correction", + path=os.path.join(self.bin_home, binary_name), + args=[os.path.abspath(cfg_file_name)], + config_dir=os.path.relpath(cfg.output_dir, options_storage.args.output_dir), + short_name=self.short_name, + del_after=[cfg.tmp_dir], + output_files=[self.output_files["corrected_dataset_yaml_filename"]])] + return command + + +class ErrorCorrectionCompressingStage(stage.Stage): + def get_command(self, cfg): + args = [os.path.join(self.python_modules_home, "spades_pipeline", "scripts", "compress_all.py"), + "--input_file", self.output_files["corrected_dataset_yaml_filename"], + "--ext_python_modules_home", self.ext_python_modules_home, + "--max_threads", str(cfg.max_threads), + "--output_dir", cfg.output_dir] + if cfg.not_used_dataset_yaml_filename != "": + args += ["--not_used_yaml_file", cfg.not_used_dataset_yaml_filename] + if cfg.gzip_output: + args.append("--gzip_output") + + command = [commands_parser.Command(STAGE="corrected reads compression", + path=sys.executable, + args=args, + short_name=self.short_name)] + return command + + +class ErrorCorrectionStage(stage.Stage): + STAGE_NAME = "Read error correction" + stages = [] + + def __init__(self, cfg, *args): + super(ErrorCorrectionStage, self).__init__(*args) + + self.cfg = merge_configs(cfg["error_correction"], cfg["common"]) + self.output_files["corrected_dataset_yaml_filename"] = os.path.join(self.cfg.output_dir, "corrected.yaml") + + self.cfg.not_used_dataset_yaml_filename = "" + self.stages.append(ECRunningToolStage("ec_runtool", + self.output_files, self.tmp_configs_dir, self.dataset_data, + self.log, self.bin_home, self.ext_python_modules_home, + self.python_modules_home)) + self.stages.append( + ErrorCorrectionCompressingStage("ec_compress", + self.output_files, self.tmp_configs_dir, self.dataset_data, self.log, + self.bin_home, self.ext_python_modules_home, + self.python_modules_home)) + + + def generate_config(self, cfg): + if sys.version.startswith("2."): + import pyyaml2 as pyyaml + elif sys.version.startswith("3."): + import pyyaml3 as pyyaml + + self.cfg = merge_configs(cfg["error_correction"], cfg["common"]) + self.output_files["corrected_dataset_yaml_filename"] = os.path.join(self.cfg.output_dir, "corrected.yaml") + self.cfg.__dict__["dataset_yaml_filename"] = cfg["dataset"].yaml_filename + + addsitedir(self.ext_python_modules_home) + + if not os.path.isdir(self.cfg.output_dir): + os.makedirs(self.cfg.output_dir) + + # not all reads need processing + if support.get_lib_ids_by_type(self.dataset_data, options_storage.LONG_READS_TYPES): + not_used_dataset_data = support.get_libs_by_type(self.dataset_data, options_storage.LONG_READS_TYPES) + to_correct_dataset_data = support.rm_libs_by_type(self.dataset_data, options_storage.LONG_READS_TYPES) + to_correct_dataset_yaml_filename = os.path.join(self.cfg.output_dir, "to_correct.yaml") + self.cfg.not_used_dataset_yaml_filename = os.path.join(self.cfg.output_dir, "dont_correct.yaml") + with open(to_correct_dataset_yaml_filename, 'w') as f: + pyyaml.dump(to_correct_dataset_data, f, + default_flow_style=False, default_style='"', width=float("inf")) + + with open(self.cfg.not_used_dataset_yaml_filename, 'w') as f: + pyyaml.dump(not_used_dataset_data, f, + default_flow_style=False, default_style='"', width=float("inf")) + self.cfg.dataset_yaml_filename = to_correct_dataset_yaml_filename + else: + self.cfg.not_used_dataset_yaml_filename = "" + + for stage in self.stages: + stage.generate_config(self.cfg) + + def get_command(self, cfg): + return [commands_parser.Command(STAGE=self.STAGE_NAME, + path="true", + args=[], + short_name=self.short_name + "_start")] + \ + [x for stage in self.stages for x in stage.get_command(self.cfg)] + \ + [commands_parser.Command(STAGE=self.STAGE_NAME, + path="true", + args=[], + short_name=self.short_name + "_finish")] + + +def add_to_pipeline(pipeline, cfg, output_files, tmp_configs_dir, dataset_data, log, + bin_home, ext_python_modules_home, python_modules_home): + if "error_correction" in cfg: + pipeline.add(ErrorCorrectionStage(cfg, "ec", output_files, tmp_configs_dir, + dataset_data, log, bin_home, ext_python_modules_home, python_modules_home)) diff --git a/src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/stages/pipeline.py b/src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/stages/pipeline.py new file mode 100644 index 0000000..6f740f0 --- /dev/null +++ b/src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/stages/pipeline.py @@ -0,0 +1,43 @@ +#!/usr/bin/env python + +############################################################################ +# Copyright (c) 2015-2019 Saint Petersburg State University +# Copyright (c) 2011-2014 Saint Petersburg Academic University +# All Rights Reserved +# See file LICENSE for details. +############################################################################ + +import os +import shutil +from distutils import dir_util + +import options_storage + +class Pipeline(object): + stages = [] + + # copying configs before all computations (to prevent its changing at run time) + def copy_configs(self, cfg, spades_home, tmp_configs_dir): + if os.path.isdir(tmp_configs_dir): + shutil.rmtree(tmp_configs_dir) + if not os.path.isdir(tmp_configs_dir): + if options_storage.args.configs_dir: + dir_util.copy_tree(options_storage.args.configs_dir, tmp_configs_dir, preserve_times=False, + preserve_mode=False) + else: + dir_util.copy_tree(os.path.join(spades_home, "configs"), tmp_configs_dir, preserve_times=False, + preserve_mode=False) + + def add(self, stage): + self.stages.append(stage) + + def get_commands(self, cfg): + commands = [] + for stage in self.stages: + commands += stage.get_command(cfg) + return commands + + def generate_configs(self, cfg, spades_home, tmp_configs_dir): + self.copy_configs(cfg, spades_home, tmp_configs_dir) + for stage in self.stages: + stage.generate_config(cfg) diff --git a/src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/stages/postprocessing_stage.py b/src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/stages/postprocessing_stage.py new file mode 100644 index 0000000..1fc5447 --- /dev/null +++ b/src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/stages/postprocessing_stage.py @@ -0,0 +1,45 @@ +#!/usr/bin/env python + +############################################################################ +# Copyright (c) 2015-2019 Saint Petersburg State University +# Copyright (c) 2011-2014 Saint Petersburg Academic University +# All Rights Reserved +# See file LICENSE for details. +############################################################################ + +import os +import shutil +import sys + +import options_storage +from stages import stage +import commands_parser + + +class PostprocessingStage(stage.Stage): + STAGE_NAME = "Postprocessing" + + def get_command(self, cfg): + # args: result_scaffolds_filename, assembled_scaffolds_filename, bin_home, ext_python_modules_home, output_dir, truseq_long_reads_file_base, dataset_yaml_file + args = [os.path.join(self.python_modules_home, "spades_pipeline", "scripts", "postprocessing_script.py"), + "--result_scaffolds_filename", self.output_files["result_scaffolds_filename"], + "--assembled_scaffolds_filename", self.output_files["assembled_scaffolds_filename"], + "--bin_home", self.bin_home, + "--ext_python_modules_home", self.ext_python_modules_home, + "--output_dir", cfg["common"].output_dir, + "--truseq_long_reads_file_base", self.output_files["truseq_long_reads_file_base"], + "--dataset_yaml_file", options_storage.args.dataset_yaml_filename, + "--threads", str(options_storage.args.threads)] + + return [commands_parser.Command(STAGE=self.STAGE_NAME, + path=sys.executable, + args=args, + short_name=self.short_name)] + + +def add_to_pipeline(pipeline, cfg, output_files, tmp_configs_dir, dataset_data, log, + bin_home, ext_python_modules_home, python_modules_home): + if "assembly" in cfg and cfg["run_truseq_postprocessing"]: + pipeline.add( + PostprocessingStage("tpp", output_files, tmp_configs_dir, dataset_data, log, + bin_home, ext_python_modules_home, python_modules_home)) diff --git a/src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/stages/preprocess_reads_stage.py b/src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/stages/preprocess_reads_stage.py new file mode 100644 index 0000000..57acb15 --- /dev/null +++ b/src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/stages/preprocess_reads_stage.py @@ -0,0 +1,302 @@ +#!/usr/bin/env python + +############################################################################ +# Copyright (c) 2015-2019 Saint Petersburg State University +# Copyright (c) 2011-2014 Saint Petersburg Academic University +# All Rights Reserved +# See file LICENSE for details. +############################################################################ + +import os +import sys +import gzip + +import support +import options_storage +import commands_parser +from stages import stage + + +class PreprocessInterlacedReads(stage.Stage): + STAGE_NAME = "Preprocess interlaced reads" + + # {input_filename, out_left_filename, out_right_filename, was_compressed, is_fastq}} + update_list = [] + + def split_interlaced_reads(self, dataset_data, dst, log): + self.dst = dst + for reads_library in dataset_data: + copy_reads_library = dict(reads_library) + for key, value in copy_reads_library.items(): + if key == "interlaced reads": + if "left reads" not in reads_library: + reads_library["left reads"] = [] + reads_library["right reads"] = [] + for interlaced_reads in value: + if interlaced_reads in options_storage.dict_of_prefixes: + ext = options_storage.dict_of_prefixes[interlaced_reads] + else: + ext = os.path.splitext(interlaced_reads)[1] + was_compressed = False + if ext.endswith(".gz"): + was_compressed = True + ungzipped = os.path.splitext(interlaced_reads)[0] + out_basename, ext = os.path.splitext(os.path.basename(ungzipped)) + else: + out_basename, ext = os.path.splitext(os.path.basename(interlaced_reads)) + + if interlaced_reads in options_storage.dict_of_prefixes: + ext = options_storage.dict_of_prefixes[interlaced_reads] + + if ext.lower().startswith(".fq") or ext.lower().startswith(".fastq"): + is_fastq = True + ext = ".fastq" + else: + is_fastq = False + ext = ".fasta" + + out_left_filename = os.path.join(dst, "%s_1%s" % (out_basename, ext)) + out_right_filename = os.path.join(dst, "%s_2%s" % (out_basename, ext)) + + self.update_list.append({"input_filename": interlaced_reads, + "out_left_filename": out_left_filename, + "out_right_filename": out_right_filename, + "was_compressed": was_compressed, + "is_fastq": is_fastq}) + + reads_library["left reads"].append(out_left_filename) + reads_library["right reads"].append(out_right_filename) + + if interlaced_reads in options_storage.dict_of_prefixes: + del options_storage.dict_of_prefixes[interlaced_reads] + del reads_library["interlaced reads"] + + def generate_config(self, cfg): + self.split_interlaced_reads(self.dataset_data, self.dst, self.log) + + with open(os.path.join(self.tmp_dir, "interlaced"), "w") as fw: + for update_item in self.update_list: + fw.write(update_item["input_filename"] + "\n") + fw.write(update_item["out_left_filename"] + "\n") + fw.write(update_item["out_right_filename"] + "\n") + fw.write(str(update_item["was_compressed"]) + "\n") + fw.write(str(update_item["is_fastq"]) + "\n") + + def get_command(self, cfg): + command = [commands_parser.Command(STAGE=self.STAGE_NAME, + path=sys.executable, + args=[ + os.path.join(self.python_modules_home, "spades_pipeline", "scripts", + "preprocess_interlaced_reads.py"), + "--args_filename", os.path.join(self.tmp_dir, "interlaced"), + "--dst", self.dst], + short_name=self.short_name)] + return command + + def __init__(self, dir_for_split_reads, tmp_dir, *args): + super(PreprocessInterlacedReads, self).__init__(*args) + self.dst = dir_for_split_reads + self.tmp_dir = tmp_dir + + +class PreprocessNxmateReads(stage.Stage): + STAGE_NAME = "Preprocess nxmate reads" + + # {infile1, infile2} + update_list = [] + + def get_new_names(self, infilename1, infilename2): + basename1 = os.path.basename(infilename1) + if os.path.splitext(basename1)[1] == ".gz": + basename1 = os.path.splitext(basename1)[0] + basename2 = os.path.basename(infilename2) + if os.path.splitext(basename2)[1] == ".gz": + basename2 = os.path.splitext(basename2)[0] + # open three outfiles + splitfilenameleft = os.path.join(self.dst, "R1_IJS7_mates_ICC4_" + basename1) + + splitfilenameright = os.path.join(self.dst, "R2_IJS7_mates_ICC4_" + basename2) + + unsplitfilename = os.path.join(self.dst, "unsplit_IJS7_mates_ICC4_" + basename1.replace("_R1_", "_R1R2_")) + return splitfilenameleft, splitfilenameright, unsplitfilename + + def process_nxmate_reads(self, dataset_data, dst, log): + self.dst = dst + for reads_library in dataset_data: + if reads_library["type"] == "nxmate": + raw_left_reads = reads_library["left reads"] + raw_right_reads = reads_library["right reads"] + reads_library["left reads"] = [] + reads_library["right reads"] = [] + reads_library["single reads"] = [] + for id, left_reads_fpath in enumerate(raw_left_reads): + right_reads_fpath = raw_right_reads[id] + processed_left_reads_fpath, processed_right_reads_fpath, single_reads_fpath = \ + self.get_new_names(left_reads_fpath, right_reads_fpath) + reads_library["left reads"].append(processed_left_reads_fpath) + reads_library["right reads"].append(processed_right_reads_fpath) + reads_library["single reads"].append(single_reads_fpath) + self.update_list.append({"infile1": left_reads_fpath, + "infile2": right_reads_fpath}) + reads_library["type"] = "mate-pairs" + reads_library["orientation"] = "fr" + + def generate_config(self, cfg): + self.process_nxmate_reads(self.dataset_data, self.dst, self.log) + with open(os.path.join(self.tmp_dir, "nxmate"), "w") as fw: + for update_item in self.update_list: + fw.write(update_item["infile1"] + "\n") + fw.write(update_item["infile2"] + "\n") + + def get_command(self, cfg): + command = [commands_parser.Command(STAGE=self.STAGE_NAME, + path=sys.executable, + args=[ + os.path.join(self.python_modules_home, "spades_pipeline", "scripts", + "preprocess_nxmate_reads.py"), + "--args_filename", os.path.join(self.tmp_dir, "nxmate"), + "--dst", self.dst, + "--threads", str(options_storage.args.threads)], + short_name=self.short_name)] + return command + + def __init__(self, dir_for_split_reads, tmp_dir, *args): + super(PreprocessNxmateReads, self).__init__(*args) + self.tmp_dir = tmp_dir + self.dst = dir_for_split_reads + + +class PreprocessContigs(stage.Stage): + STAGE_NAME = "Preprocess additional contigs" + + # (gzipped, old_filename, new_filename) + update_list = [] + + def process_Ns_in_additional_contigs(self, dataset_data, dst, log): + self.dst = dst + for reads_library in dataset_data: + if reads_library["type"].endswith("contigs"): + new_entry = [] + for contigs in reads_library["single reads"]: + if contigs in options_storage.dict_of_prefixes: + ext = options_storage.dict_of_prefixes[contigs] + basename = contigs + else: + basename, ext = os.path.splitext(contigs) + + gzipped = False + if ext.endswith(".gz"): + gzipped = True + if contigs not in options_storage.dict_of_prefixes: + basename, _ = os.path.splitext(basename) + new_filename = os.path.join(dst, os.path.basename(basename) + ".fasta") + if contigs in options_storage.dict_of_prefixes: + del options_storage.dict_of_prefixes[contigs] + new_entry.append(new_filename) + self.update_list.append({"gzipped": gzipped, "old_filename": contigs, "new_filename": new_filename}) + reads_library["single reads"] = new_entry + + def generate_config(self, cfg): + self.process_Ns_in_additional_contigs(self.dataset_data, self.dst, self.log) + with open(os.path.join(self.tmp_dir, "contigs"), "w") as fw: + for update_item in self.update_list: + fw.write(str(update_item["gzipped"]) + "\n") + fw.write(update_item["old_filename"] + "\n") + fw.write(update_item["new_filename"] + "\n") + + def get_command(self, cfg): + command = [commands_parser.Command(STAGE=self.STAGE_NAME, + path=sys.executable, + args=[ + os.path.join(self.python_modules_home, "spades_pipeline", "scripts", + "preprocess_contigs.py"), + "--args_filename", os.path.join(self.tmp_dir, "contigs"), + "--dst", self.dst, + "--threshold_for_breaking_additional_contigs", + str(options_storage.THRESHOLD_FOR_BREAKING_ADDITIONAL_CONTIGS)], + short_name=self.short_name)] + return command + + def __init__(self, dir_for_split_reads, tmp_dir, *args): + super(PreprocessContigs, self).__init__(*args) + self.tmp_dir = tmp_dir + self.dst = dir_for_split_reads + + +# splitting interlaced reads and processing Ns in additional contigs if needed +class PreprocessReadsStage(stage.Stage): + STAGE_NAME = "Preprocess reads" + stages = [] + + def __init__(self, cfg, *args): + super(PreprocessReadsStage, self).__init__(*args) + + self.dir_for_split_reads = os.path.join(options_storage.args.output_dir, "split_input") + self.tmp_dir = os.path.join(self.dir_for_split_reads, "tmp") + + if support.dataset_has_interlaced_reads(self.dataset_data): + self.stages.append(PreprocessInterlacedReads(self.dir_for_split_reads, self.tmp_dir, "preprocess_12", + self.output_files, self.tmp_configs_dir, + self.dataset_data, self.log, + self.bin_home, + self.ext_python_modules_home, + self.python_modules_home)) + + if support.dataset_has_nxmate_reads(self.dataset_data): + self.stages.append(PreprocessNxmateReads(self.dir_for_split_reads, self.tmp_dir, "preporocess_nxmate", + self.output_files, self.tmp_configs_dir, + self.dataset_data, self.log, + self.bin_home, + self.ext_python_modules_home, + self.python_modules_home)) + + if support.dataset_has_additional_contigs(self.dataset_data): + self.stages.append(PreprocessContigs(self.dir_for_split_reads, self.tmp_dir, "preprocess_ac", + self.output_files, self.tmp_configs_dir, + self.dataset_data, self.log, + self.bin_home, + self.ext_python_modules_home, + self.python_modules_home)) + + options_storage.args.dataset_yaml_filename = os.path.join(options_storage.args.output_dir, + "input_dataset.yaml") + + cfg["dataset"].yaml_filename = options_storage.args.dataset_yaml_filename + + def generate_config(self, cfg): + if not os.path.isdir(self.dir_for_split_reads): + os.makedirs(self.dir_for_split_reads) + if not os.path.isdir(self.tmp_dir): + os.makedirs(self.tmp_dir) + + for stage in self.stages: + stage.generate_config(cfg) + + if sys.version.startswith("2."): + import pyyaml2 as pyyaml + elif sys.version.startswith("3."): + import pyyaml3 as pyyaml + + with open(options_storage.args.dataset_yaml_filename, 'w') as f: + pyyaml.dump(self.dataset_data, f, + default_flow_style=False, default_style='"', width=float("inf")) + + def get_command(self, cfg): + return [commands_parser.Command(STAGE=self.STAGE_NAME, + path="true", + args=[], + short_name=self.short_name + "_start")] + \ + [x for stage in self.stages for x in stage.get_command(cfg)] + \ + [commands_parser.Command(STAGE=self.STAGE_NAME, + path="true", + args=[], + short_name=self.short_name + "_finish", + del_after=[self.tmp_dir])] + + +def add_to_pipeline(pipeline, cfg, output_files, tmp_configs_dir, dataset_data, log, + bin_home, ext_python_modules_home, python_modules_home): + if support.dataset_has_interlaced_reads(dataset_data) or support.dataset_has_additional_contigs(dataset_data) \ + or support.dataset_has_nxmate_reads(dataset_data): + pipeline.add(PreprocessReadsStage(cfg, "preprocess", output_files, tmp_configs_dir, + dataset_data, log, bin_home, ext_python_modules_home, python_modules_home)) diff --git a/src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/stages/scaffold_correction_stage.py b/src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/stages/scaffold_correction_stage.py new file mode 100644 index 0000000..4dfac24 --- /dev/null +++ b/src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/stages/scaffold_correction_stage.py @@ -0,0 +1,112 @@ +#!/usr/bin/env python + +############################################################################ +# Copyright (c) 2015-2019 Saint Petersburg State University +# Copyright (c) 2011-2014 Saint Petersburg Academic University +# All Rights Reserved +# See file LICENSE for details. +############################################################################ + +import os +import shutil +from distutils import dir_util + +import commands_parser +import options_storage +from stages import stage +import process_cfg +from process_cfg import bool_to_str + +# TODO copypast from iteraton stage +READS_TYPES_USED_IN_CONSTRUCTION = ["paired-end", "single", "hq-mate-pairs"] +READS_TYPES_USED_IN_RNA_SEQ = ["paired-end", "single", "trusted-contigs", "untrusted-contigs"] + + +def prepare_config_scaffold_correction(filename, cfg, log, saves_dir, K): + subst_dict = dict() + + subst_dict["K"] = str(K) + subst_dict["dataset"] = process_cfg.process_spaces(cfg.dataset) + subst_dict["output_base"] = process_cfg.process_spaces(os.path.join(cfg.output_dir, "SCC")) + subst_dict["tmp_dir"] = process_cfg.process_spaces(cfg.tmp_dir) + subst_dict["use_additional_contigs"] = bool_to_str(False) + subst_dict["main_iteration"] = bool_to_str(False) + subst_dict["entry_point"] = options_storage.BASE_STAGE + subst_dict["load_from"] = saves_dir + subst_dict["developer_mode"] = bool_to_str(cfg.developer_mode) + subst_dict["max_threads"] = cfg.max_threads + subst_dict["max_memory"] = cfg.max_memory + + # todo + process_cfg.substitute_params(filename, subst_dict, log) + + +def add_configs(command, configs_dir): + # Order matters here! + mode_config_mapping = [("single_cell", "mda_mode"), + ("meta", "meta_mode"), + ("truseq_mode", "moleculo_mode"), + ("rna", "rna_mode"), + ("large_genome", "large_genome_mode"), + ("plasmid", "plasmid_mode")] + # ("careful", "careful_mode"), + for (mode, config) in mode_config_mapping: + if options_storage.args.__dict__[mode]: + if mode == "rna" or mode == "meta": + command.append(os.path.join(configs_dir, "mda_mode.info")) + command.append(os.path.join(configs_dir, config + ".info")) + if options_storage.args.__dict__["careful"]: + if options_storage.args.__dict__["single_cell"]: + command.append(os.path.join(configs_dir, "careful_mda_mode.info")) + else: + command.append(os.path.join(configs_dir, "careful_mode.info")) + + # special case: extra config + if options_storage.args.rna and options_storage.args.fast: + command.append(os.path.join(configs_dir, "rna_fast_mode.info")) + + +class ScaffoldCorrectionStage(stage.Stage): + def __init__(self, latest, *args): + super(ScaffoldCorrectionStage, self).__init__(*args) + self.latest = latest + + def generate_config(self, cfg): + K = cfg.iterative_K[-1] + latest = os.path.join(cfg.output_dir, "K%d" % K) + K = options_storage.SCC_K + data_dir = os.path.join(cfg.output_dir, "SCC", "K%d" % K) + saves_dir = os.path.join(data_dir, "saves") + dst_configs = os.path.join(data_dir, "configs") + cfg_file_name = os.path.join(dst_configs, "config.info") + + if os.path.isdir(data_dir): + shutil.rmtree(data_dir) + os.makedirs(data_dir) + + dir_util.copy_tree(os.path.join(self.tmp_configs_dir, "debruijn"), dst_configs, preserve_times=False) + + scaffolds_file = os.path.join(latest, "scaffolds.fasta") + if "read_buffer_size" in cfg.__dict__: + construction_cfg_file_name = os.path.join(dst_configs, "construction.info") + process_cfg.substitute_params(construction_cfg_file_name, {"read_buffer_size": cfg.read_buffer_size}, + self.log) + process_cfg.substitute_params(os.path.join(dst_configs, "moleculo_mode.info"), + {"scaffolds_file": scaffolds_file}, self.log) + prepare_config_scaffold_correction(cfg_file_name, cfg, self.log, saves_dir, K) + + def get_command(self, cfg): + K = options_storage.SCC_K + data_dir = os.path.join(cfg.output_dir, "SCC", "K%d" % K) + dst_configs = os.path.join(data_dir, "configs") + cfg_file_name = os.path.join(dst_configs, "config.info") + + args = [cfg_file_name] + add_configs(args, dst_configs) + command = [commands_parser.Command( + STAGE="SCC", + path=os.path.join(self.bin_home, "spades-truseq-scfcorrection"), + args=args, + config_dir=os.path.relpath(data_dir, options_storage.args.output_dir), + short_name=self.short_name)] + return command diff --git a/src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/stages/spades_iteration_stage.py b/src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/stages/spades_iteration_stage.py new file mode 100644 index 0000000..e68c28d --- /dev/null +++ b/src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/stages/spades_iteration_stage.py @@ -0,0 +1,171 @@ +#!/usr/bin/env python + +############################################################################ +# Copyright (c) 2015-2019 Saint Petersburg State University +# Copyright (c) 2011-2014 Saint Petersburg Academic University +# All Rights Reserved +# See file LICENSE for details. +############################################################################ + +import os +import shutil +from distutils import dir_util + +import commands_parser +import options_storage +from stages import stage +import process_cfg +from process_cfg import bool_to_str + + +# FIXME double with scaffold correction stage +def add_configs(command, configs_dir): + # Order matters here! + mode_config_mapping = [("isolate", "isolate_mode"), + ("single_cell", "mda_mode"), + ("meta", "meta_mode"), + ("truseq_mode", "moleculo_mode"), + ("rna", "rna_mode"), + ("large_genome", "large_genome_mode"), + ("plasmid", "plasmid_mode"), + ("bio", "bgc_mode")] + # ("careful", "careful_mode"), + for (mode, config) in mode_config_mapping: + if options_storage.args.__dict__[mode]: + if mode == "rna" or mode == "meta": + command.append(os.path.join(configs_dir, "mda_mode.info")) + command.append(os.path.join(configs_dir, config + ".info")) + if options_storage.args.__dict__["careful"]: + if options_storage.args.__dict__["single_cell"]: + command.append(os.path.join(configs_dir, "careful_mda_mode.info")) + else: + command.append(os.path.join(configs_dir, "careful_mode.info")) + + +def prepare_config_spades(filename, cfg, log, additional_contigs_fname, K, stage, saves_dir, last_one, execution_home): + subst_dict = dict() + subst_dict["K"] = str(K) + subst_dict["dataset"] = process_cfg.process_spaces(cfg.dataset) + subst_dict["output_base"] = process_cfg.process_spaces(cfg.output_dir) + subst_dict["tmp_dir"] = process_cfg.process_spaces(cfg.tmp_dir) + if additional_contigs_fname: + subst_dict["additional_contigs"] = process_cfg.process_spaces(additional_contigs_fname) + subst_dict["use_additional_contigs"] = bool_to_str(True) + else: + subst_dict["use_additional_contigs"] = bool_to_str(False) + subst_dict["main_iteration"] = bool_to_str(last_one) + subst_dict["entry_point"] = stage + subst_dict["load_from"] = saves_dir + if "checkpoints" in cfg.__dict__: + subst_dict["checkpoints"] = cfg.checkpoints + subst_dict["developer_mode"] = bool_to_str(cfg.developer_mode) + subst_dict["gap_closer_enable"] = bool_to_str(last_one or K >= options_storage.GAP_CLOSER_ENABLE_MIN_K) + subst_dict["rr_enable"] = bool_to_str(last_one and cfg.rr_enable) +# subst_dict["topology_simplif_enabled"] = bool_to_str(last_one) + subst_dict["max_threads"] = cfg.max_threads + subst_dict["max_memory"] = cfg.max_memory + subst_dict["save_gp"] = bool_to_str(cfg.save_gp) + if not last_one: + subst_dict["correct_mismatches"] = bool_to_str(False) + if "resolving_mode" in cfg.__dict__: + subst_dict["resolving_mode"] = cfg.resolving_mode + if "pacbio_mode" in cfg.__dict__: + subst_dict["pacbio_test_on"] = bool_to_str(cfg.pacbio_mode) + subst_dict["pacbio_reads"] = process_cfg.process_spaces(cfg.pacbio_reads) + if cfg.cov_cutoff == "off": + subst_dict["use_coverage_threshold"] = bool_to_str(False) + else: + subst_dict["use_coverage_threshold"] = bool_to_str(True) + if cfg.cov_cutoff == "auto": + subst_dict["coverage_threshold"] = 0.0 + else: + subst_dict["coverage_threshold"] = cfg.cov_cutoff + if cfg.lcer_cutoff is not None: + subst_dict["lcer_enabled"] = bool_to_str(True) + subst_dict["lcer_coverage_threshold"] = cfg.lcer_cutoff + + if "series_analysis" in cfg.__dict__: + subst_dict["series_analysis"] = cfg.series_analysis + process_cfg.substitute_params(filename, subst_dict, log) + + +def prepare_config_rnaspades(filename, log): + if not options_storage.args.rna: + return + subst_dict = dict() + subst_dict["ss_enabled"] = bool_to_str(options_storage.args.strand_specificity is not None) + subst_dict["antisense"] = bool_to_str(options_storage.args.strand_specificity == "rf") + process_cfg.substitute_params(filename, subst_dict, log) + +def prepare_config_bgcspades(filename, cfg, log): + if not options_storage.args.bio: + return + subst_dict = dict() + subst_dict["set_of_hmms"] = cfg.set_of_hmms + process_cfg.substitute_params(filename, subst_dict, log) + +def prepare_config_construction(filename, log): + if options_storage.args.read_cov_threshold is None: + return + subst_dict = dict() + subst_dict["read_cov_threshold"] = options_storage.args.read_cov_threshold + process_cfg.substitute_params(filename, subst_dict, log) + + +class IterationStage(stage.Stage): + def __init__(self, K, prev_K, last_one, get_stage, latest, *args): + super(IterationStage, self).__init__(*args) + self.K = K + self.short_name = "k%d" % self.K + self.prev_K = prev_K + self.last_one = last_one + self.get_stage = get_stage + self.latest = latest + + def generate_config(self, cfg): + data_dir = os.path.join(cfg.output_dir, "K%d" % self.K) + saves_dir = os.path.join(data_dir, "saves") + dst_configs = os.path.join(data_dir, "configs") + + if self.get_stage(self.short_name) == options_storage.BASE_STAGE: + if not os.path.isdir(data_dir): + os.makedirs(data_dir) + + dir_util._path_created = {} # see http://stackoverflow.com/questions/9160227/dir-util-copy-tree-fails-after-shutil-rmtree + dir_util.copy_tree(os.path.join(self.tmp_configs_dir, "debruijn"), dst_configs, preserve_times=False) + + if self.prev_K: + additional_contigs_dname = os.path.join(cfg.output_dir, "K%d" % self.prev_K, "simplified_contigs") + else: + additional_contigs_dname = None + + if "read_buffer_size" in cfg.__dict__: + # FIXME why here??? + process_cfg.substitute_params(os.path.join(dst_configs, "construction.info"), + {"read_buffer_size": cfg.read_buffer_size}, self.log) + if "scaffolding_mode" in cfg.__dict__: + # FIXME why here??? + process_cfg.substitute_params(os.path.join(dst_configs, "pe_params.info"), + {"scaffolding_mode": cfg.scaffolding_mode}, self.log) + + prepare_config_rnaspades(os.path.join(dst_configs, "rna_mode.info"), self.log) + prepare_config_bgcspades(os.path.join(dst_configs, "bgc_mode.info"), cfg, self.log) + prepare_config_construction(os.path.join(dst_configs, "construction.info"), self.log) + cfg_fn = os.path.join(dst_configs, "config.info") + prepare_config_spades(cfg_fn, cfg, self.log, additional_contigs_dname, self.K, self.get_stage(self.short_name), + saves_dir, self.last_one, self.bin_home) + + def get_command(self, cfg): + data_dir = os.path.join(cfg.output_dir, "K%d" % self.K) + dst_configs = os.path.join(data_dir, "configs") + cfg_fn = os.path.join(dst_configs, "config.info") + args = [cfg_fn] + add_configs(args, dst_configs) + + command = [commands_parser.Command( + STAGE="K%d" % self.K, + path=os.path.join(self.bin_home, "spades-core"), + args=args, + config_dir=os.path.relpath(data_dir, options_storage.args.output_dir), + short_name=self.short_name)] + return command diff --git a/src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/stages/spades_stage.py b/src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/stages/spades_stage.py new file mode 100644 index 0000000..68e9737 --- /dev/null +++ b/src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/stages/spades_stage.py @@ -0,0 +1,378 @@ +#!/usr/bin/env python + +############################################################################ +# Copyright (c) 2015-2019 Saint Petersburg State University +# Copyright (c) 2011-2014 Saint Petersburg Academic University +# All Rights Reserved +# See file LICENSE for details. +############################################################################ + +import os +import shutil +import sys +from site import addsitedir + +import commands_parser +import options_storage +from stages import stage +from stages import scaffold_correction_stage +from stages import spades_iteration_stage +import support +from process_cfg import merge_configs + + +def get_read_length(output_dir, K, ext_python_modules_home, log): + est_params_filename = os.path.join(output_dir, "K%d" % K, "final.lib_data") + max_read_length = 0 + if os.path.isfile(est_params_filename): + addsitedir(ext_python_modules_home) + if sys.version.startswith("2."): + import pyyaml2 as pyyaml + elif sys.version.startswith("3."): + import pyyaml3 as pyyaml + est_params_data = pyyaml.load(open(est_params_filename)) + max_read_length = int(est_params_data["nomerge max read length"]) + log.info("Max read length detected as %d" % max_read_length) + if max_read_length == 0: + support.error("Failed to estimate maximum read length! File with estimated params: %s" % est_params_filename, log) + return max_read_length + + +def update_k_mers_in_special_cases(cur_k_mers, RL, log, silent=False): + if options_storage.auto_K_allowed(): + if RL >= 250: + if not silent: + log.info("Default k-mer sizes were set to %s because estimated " + "read length (%d) is equal to or greater than 250" % (str(options_storage.K_MERS_250), RL)) + return options_storage.K_MERS_250 + if RL >= 150: + if not silent: + log.info("Default k-mer sizes were set to %s because estimated " + "read length (%d) is equal to or greater than 150" % (str(options_storage.K_MERS_150), RL)) + return options_storage.K_MERS_150 + if RL <= max(cur_k_mers): + new_k_mers = [k for k in cur_k_mers if k < RL] + if not silent: + log.info("K-mer sizes were set to %s because estimated " + "read length (%d) is less than %d" % (str(new_k_mers), RL, max(cur_k_mers))) + return new_k_mers + return cur_k_mers + + +def reveal_original_k_mers(RL): + if options_storage.original_k_mers is None or options_storage.original_k_mers == "auto": + cur_k_mers = options_storage.args.k_mers + options_storage.args.k_mers = options_storage.original_k_mers + original_k_mers = update_k_mers_in_special_cases(options_storage.K_MERS_SHORT, RL, None, silent=True) + options_storage.args.k_mers = cur_k_mers + else: + original_k_mers = options_storage.original_k_mers + original_k_mers = [k for k in original_k_mers if k < RL] + return original_k_mers + + +def rna_k_values(support, dataset_data, log): + rna_rl = support.get_reads_length(dataset_data, log, ["merged reads"]) + upper_k = int(rna_rl / 2) - 1 + if upper_k % 2 == 0: + upper_k -= 1 + + lower_k = min(max(int(rna_rl / 3), options_storage.RNA_MIN_K), options_storage.RNA_MAX_LOWER_K) + if lower_k % 2 == 0: + lower_k -= 1 + + use_iterative = True + if upper_k <= lower_k: + use_iterative = False + + if upper_k < options_storage.RNA_MIN_K: + support.warning("\nauto K value (%d) is too small, recommended to be at least %d.\n" % (upper_k, options_storage.RNA_MIN_K)) + if rna_rl <= options_storage.RNA_MIN_K: + support.warning( + "read length is too small (%d), but keeping current K value anyway. Consider setting K manually. K\n" % ( + rna_rl)) + else: + upper_k = options_storage.RNA_MIN_K + log.info("Upper K value is set to %d.\n" % (upper_k)) + + if upper_k > options_storage.MAX_K: + log.info("\nAuto K value (%d) is too large, all K values should not exceed %d. Setting k=%d.\n" + % (upper_k, options_storage.MAX_K, options_storage.MAX_K)) + upper_k = options_storage.MAX_K + + if not use_iterative: + return [upper_k] + return [lower_k, upper_k] + + +def generateK_for_rna(cfg, dataset_data, log): + if cfg.iterative_K == "auto": + k_values = options_storage.K_MERS_RNA + if not options_storage.args.iontorrent: + k_values = rna_k_values(support, dataset_data, log) + cfg.iterative_K = k_values + log.info("K values to be used: " + str(k_values)) + + +def generateK(cfg, log, dataset_data, silent=False): + if options_storage.args.rna: + generateK_for_rna(cfg, dataset_data, log) + elif not options_storage.args.iontorrent: + RL = support.get_primary_max_reads_length(dataset_data, log, ["merged reads"], + options_storage.READS_TYPES_USED_IN_CONSTRUCTION) + if options_storage.auto_K_allowed(): + if options_storage.args.plasmid: + if RL >= 150: + if not silent: + log.info("Default k-mer sizes were set to %s because estimated read length (%d) is equal to or greater than 150" % (str(options_storage.K_MERS_PLASMID_LONG), RL)) + cfg.iterative_K = options_storage.K_MERS_PLASMID_LONG + else: + if not silent: + log.info("Default k-mer sizes were set to %s because estimated read length (%d) is less than 150" % (str(options_storage.K_MERS_PLASMID_100), RL)) + cfg.iterative_K = options_storage.K_MERS_PLASMID_100 + else: + if RL >= 250: + if not silent: + log.info("Default k-mer sizes were set to %s because estimated " + "read length (%d) is equal to or greater than 250" % (str(options_storage.K_MERS_250), RL)) + cfg.iterative_K = options_storage.K_MERS_250 + elif RL >= 150: + if not silent: + log.info("Default k-mer sizes were set to %s because estimated " + "read length (%d) is equal to or greater than 150" % (str(options_storage.K_MERS_150), RL)) + cfg.iterative_K = options_storage.K_MERS_150 + if RL <= max(cfg.iterative_K): + new_k_mers = [k for k in cfg.iterative_K if k < RL] + if not silent: + log.info("K-mer sizes were set to %s because estimated " + "read length (%d) is less than %d" % (str(new_k_mers), RL, max(cfg.iterative_K))) + cfg.iterative_K = new_k_mers + + if not isinstance(cfg.iterative_K, list): + cfg.iterative_K = [cfg.iterative_K] + cfg.iterative_K = sorted(cfg.iterative_K) + +class PlasmidGlueFileStage(stage.Stage): + STAGE_NAME = "metaplasmid glue files" + def __init__(self, latest, *args): + super(PlasmidGlueFileStage, self).__init__(*args) + self.latest = latest + + def get_command(self, cfg): + self.cfg = cfg + args = [os.path.join(self.python_modules_home, "spades_pipeline", "scripts", "plasmid_glue.py")] + args.append(self.latest) + command = [commands_parser.Command(STAGE=self.STAGE_NAME, + path=sys.executable, + args=args, + short_name=self.short_name, + )] + return command + + +class SpadesCopyFileStage(stage.Stage): + STAGE_NAME = "Copy files" + + def always_copy(self, output_file, latest, cfg): + return not cfg.correct_scaffolds + + def rna_copy(self, output_file, latest, cfg): + return options_storage.args.rna and self.always_copy(output_file, latest, cfg) + + def only_bio(self, output_file, latest, cfg): + return options_storage.args.bio + + def correct_scaffolds_copy(self, output_file, latest, cfg): + return cfg.correct_scaffolds + + def not_rna_copy(self, output_file, latest, cfg): + return (not options_storage.args.rna) and self.always_copy(output_file, latest, cfg) + + def rr_enably_copy(self, output_file, latest, cfg): + return (not options_storage.args.rna) and cfg.rr_enable and self.always_copy(output_file, latest, cfg) + + class OutputFile(object): + def __init__(self, output_file, tmp_file, need_to_copy): + self.output_file = output_file + self.tmp_file = tmp_file + self.need_to_copy = need_to_copy + + def set_output_files(self): + self.output = [ + self.OutputFile(self.cfg.result_scaffolds, "corrected_scaffolds.fasta", self.correct_scaffolds_copy), + self.OutputFile(os.path.join(os.path.dirname(self.cfg.result_contigs), "before_rr.fasta"), + "before_rr.fasta", self.always_copy), + self.OutputFile(self.cfg.result_transcripts, "transcripts.fasta", self.rna_copy), + self.OutputFile(self.cfg.result_transcripts_paths, "transcripts.paths", self.rna_copy), + self.OutputFile(self.cfg.result_contigs, "final_contigs.fasta", self.not_rna_copy), + self.OutputFile(os.path.join(os.path.dirname(self.cfg.result_contigs), + "first_pe_contigs.fasta"), "first_pe_contigs.fasta", self.not_rna_copy), + self.OutputFile(self.cfg.result_scaffolds, "scaffolds.fasta", self.rr_enably_copy), + self.OutputFile(self.cfg.result_scaffolds_paths, "scaffolds.paths", self.rr_enably_copy), + self.OutputFile(self.cfg.result_graph_gfa, "assembly_graph_with_scaffolds.gfa", self.always_copy), + self.OutputFile(self.cfg.result_graph, "assembly_graph.fastg", self.always_copy), + self.OutputFile(self.cfg.result_contigs_paths, "final_contigs.paths", self.not_rna_copy), + self.OutputFile(self.cfg.result_gene_clusters, "gene_clusters.fasta", self.only_bio), + self.OutputFile(self.cfg.result_bgc_statistics, "bgc_statistics.txt", self.only_bio), + self.OutputFile(self.cfg.result_domain_graph, "domain_graph.dot", self.only_bio) + + ] + + for filtering_type in options_storage.filtering_types: + prefix = filtering_type + "_filtered_" + result_filtered_transcripts = os.path.join(self.cfg.output_dir, + prefix + options_storage.transcripts_name) + self.OutputFile(result_filtered_transcripts, prefix + "final_paths.fasta", self.rna_copy) + + def __init__(self, latest, *args): + super(SpadesCopyFileStage, self).__init__(*args) + self.latest = latest + + def copy_files(self): + latest = self.latest + for outputfile in self.output: + if outputfile.need_to_copy(outputfile, latest, self.cfg): + filename = os.path.join(latest, outputfile.tmp_file) + if os.path.isfile(filename): + shutil.copyfile(filename, outputfile.output_file) + + def get_command(self, cfg): + self.cfg = cfg + self.set_output_files() + args = [os.path.join(self.python_modules_home, "spades_pipeline", "scripts", "copy_files.py")] + for outputfile in self.output: + if outputfile.need_to_copy(outputfile, self.latest, self.cfg): + filename = os.path.join(self.latest, outputfile.tmp_file) + args.append(filename) + args.append(outputfile.output_file) + bin_reads_dir = os.path.join(self.cfg.output_dir, ".bin_reads") + command = [commands_parser.Command(STAGE=self.STAGE_NAME, + path=sys.executable, + args=args, + short_name=self.short_name, + del_after=[bin_reads_dir, self.cfg.tmp_dir])] + return command + + +class SpadesStage(stage.Stage): + stages = [] + STAGE_NAME = "Assembling" + latest = "" + + def __init__(self, cfg, get_stage, *args): + super(SpadesStage, self).__init__(*args) + self.get_stage = get_stage + + self.generate_cfg(cfg, self.output_files) + + # creating dataset + dataset_filename = os.path.join(self.cfg.output_dir, "dataset.info") + with open(dataset_filename, 'w') as dataset_file: + import process_cfg + # TODO don't exists at that moment, what better to write???? + if self.output_files["corrected_dataset_yaml_filename"] != "": + dataset_file.write("reads\t%s\n" % + process_cfg.process_spaces(self.output_files["corrected_dataset_yaml_filename"])) + else: + dataset_file.write( + "reads\t%s\n" % process_cfg.process_spaces(cfg["dataset"].yaml_filename)) + if self.cfg.developer_mode and "reference" in cfg["dataset"].__dict__: + dataset_file.write("reference_genome\t") + dataset_file.write(process_cfg.process_spaces(cfg["dataset"].reference) + '\n') + + if not os.path.isdir(self.output_files["misc_dir"]): + os.makedirs(self.output_files["misc_dir"]) + + generateK(self.cfg, self.log, self.dataset_data) + + self.used_K = [] + count = 0 + prev_K = None + for K in self.cfg.iterative_K: + count += 1 + last_one = count == len(self.cfg.iterative_K) + + iter_stage = spades_iteration_stage.IterationStage(K, prev_K, last_one, self.get_stage, self.latest, + "k%d" % K, + self.output_files, self.tmp_configs_dir, + self.dataset_data, self.log, self.bin_home, + self.ext_python_modules_home, + self.python_modules_home) + self.stages.append(iter_stage) + self.latest = os.path.join(self.cfg.output_dir, "K%d" % K) + + self.used_K.append(K) + prev_K = K + if last_one: + break + + if self.cfg.correct_scaffolds: + self.stages.append(scaffold_correction_stage.ScaffoldCorrectionStage(self.latest, + "scc", + self.output_files, + self.tmp_configs_dir, + self.dataset_data, self.log, + self.bin_home, + self.ext_python_modules_home, + self.python_modules_home)) + self.latest = os.path.join(os.path.join(self.cfg.output_dir, "SCC"), "K21") + if options_storage.args.plasmid and options_storage.args.meta: + self.stages.append(PlasmidGlueFileStage(self.latest, "plasmid_copy_files", + self.output_files, + self.tmp_configs_dir, + self.dataset_data, self.log, + self.bin_home, + self.ext_python_modules_home, + self.python_modules_home)) + self.stages.append(SpadesCopyFileStage(self.latest, "copy_files", + self.output_files, + self.tmp_configs_dir, + self.dataset_data, self.log, + self.bin_home, + self.ext_python_modules_home, + self.python_modules_home)) + + def generate_cfg(self, cfg, output_files): + self.cfg = merge_configs(cfg["assembly"], cfg["common"]) + self.cfg.__dict__["result_contigs"] = output_files["result_contigs_filename"] + self.cfg.__dict__["result_scaffolds"] = output_files["result_scaffolds_filename"] + self.cfg.__dict__["result_graph"] = output_files["result_assembly_graph_filename"] + self.cfg.__dict__["result_graph_gfa"] = output_files["result_assembly_graph_filename_gfa"] + self.cfg.__dict__["result_contigs_paths"] = output_files["result_contigs_paths_filename"] + self.cfg.__dict__["result_scaffolds_paths"] = output_files["result_scaffolds_paths_filename"] + self.cfg.__dict__["result_transcripts"] = output_files["result_transcripts_filename"] + self.cfg.__dict__["result_transcripts_paths"] = output_files["result_transcripts_paths_filename"] + self.cfg.__dict__["result_gene_clusters"] = output_files["result_gene_clusters_filename"] + self.cfg.__dict__["result_bgc_statistics"] = output_files["result_bgc_stats_filename"] + self.cfg.__dict__["result_domain_graph"] = output_files["result_domain_graph_filename"] + + if self.cfg.disable_rr: + self.cfg.__dict__["rr_enable"] = False + else: + self.cfg.__dict__["rr_enable"] = True + + dataset_filename = os.path.join(self.cfg.output_dir, "dataset.info") + self.cfg.__dict__["dataset"] = dataset_filename + self.cfg.tmp_dir = support.get_tmp_dir(prefix="spades_") + + def generate_config(self, cfg): + for stage in self.stages: + stage.generate_config(self.cfg) + + def get_command(self, cfg): + return [commands_parser.Command(STAGE=self.STAGE_NAME, + path="true", + args=[], + short_name=self.short_name + "_start")] + \ + [x for stage in self.stages for x in stage.get_command(self.cfg)] + \ + [commands_parser.Command(STAGE=self.STAGE_NAME, + path="true", + args=[], + short_name=self.short_name + "_finish")] + + +def add_to_pipeline(pipeline, get_stage, cfg, output_files, tmp_configs_dir, dataset_data, log, bin_home, + ext_python_modules_home, python_modules_home): + if "assembly" in cfg: + pipeline.add(SpadesStage(cfg, get_stage, "as", output_files, tmp_configs_dir, + dataset_data, log, bin_home, ext_python_modules_home, python_modules_home)) diff --git a/src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/stages/stage.py b/src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/stages/stage.py new file mode 100644 index 0000000..d9439c7 --- /dev/null +++ b/src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/stages/stage.py @@ -0,0 +1,25 @@ +#!/usr/bin/env python + +############################################################################ +# Copyright (c) 2019 Saint Petersburg State University +# All Rights Reserved +# See file LICENSE for details. +############################################################################ + +class Stage(object): + def __init__(self, short_name, output_files, tmp_configs_dir, + dataset_data, log, bin_home, ext_python_modules_home, python_modules_home): + self.short_name = short_name + self.output_files = output_files + self.tmp_configs_dir = tmp_configs_dir + self.dataset_data = dataset_data + self.log = log + self.bin_home = bin_home + self.ext_python_modules_home = ext_python_modules_home + self.python_modules_home = python_modules_home + + def generate_config(self, cfg): + pass + + def get_command(self, cfg): + return [] diff --git a/src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/stages/terminating_stage.py b/src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/stages/terminating_stage.py new file mode 100644 index 0000000..a8b306b --- /dev/null +++ b/src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/stages/terminating_stage.py @@ -0,0 +1,36 @@ +#!/usr/bin/env python + +############################################################################ +# Copyright (c) 2019 Saint Petersburg State University +# All Rights Reserved +# See file LICENSE for details. +############################################################################ + +import os +import sys + +import options_storage +from stages import stage +import commands_parser + + +class TerminatingStage(stage.Stage): + STAGE_NAME = "Terminate" + + def get_command(self, cfg): + del_after = [] + if not cfg["common"].developer_mode: + del_after.append(self.tmp_configs_dir) + + return [commands_parser.Command(STAGE=self.STAGE_NAME, + path="true", + args=[], + short_name=self.short_name, + del_after=del_after)] + + +def add_to_pipeline(pipeline, cfg, output_files, tmp_configs_dir, dataset_data, log, bin_home, + ext_python_modules_home, python_modules_home): + pipeline.add(TerminatingStage("terminate", output_files, tmp_configs_dir, + dataset_data, log, bin_home, + ext_python_modules_home, python_modules_home)) \ No newline at end of file diff --git a/src/SPAdes-3.10.1-Linux/share/spades/spades_pipeline/support.py b/src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/support.py similarity index 51% rename from src/SPAdes-3.10.1-Linux/share/spades/spades_pipeline/support.py rename to src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/support.py index 7fc8d15..f58e755 100644 --- a/src/SPAdes-3.10.1-Linux/share/spades/spades_pipeline/support.py +++ b/src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/support.py @@ -1,25 +1,29 @@ #!/usr/bin/env python ############################################################################ -# Copyright (c) 2015-2016 Saint Petersburg State University +# Copyright (c) 2015-2019 Saint Petersburg State University # Copyright (c) 2011-2014 Saint Petersburg Academic University # All Rights Reserved # See file LICENSE for details. ############################################################################ +import glob +import gzip +import itertools +import logging +import math import os +import re +import shutil import stat import sys -import logging -import glob -import re -import gzip import tempfile -import shutil -import options_storage -import itertools +from distutils.version import LooseVersion from os.path import abspath, expanduser, join +import options_storage +from common import SeqIO + # constants to print and detect warnings and errors in logs SPADES_PY_ERROR_MESSAGE = "== Error == " SPADES_PY_WARN_MESSAGE = "== Warning == " @@ -30,21 +34,27 @@ # for removing tmp_dir even if error occurs current_tmp_dir = None +only_old_style_options = True +old_style_single_reads = False + + +def error(err_str, log=None, prefix=SPADES_PY_ERROR_MESSAGE): + binary_name = "SPAdes" -def error(err_str, log=None, dipspades=False, prefix=SPADES_PY_ERROR_MESSAGE): - if not dipspades: - binary_name = "SPAdes" - else: - binary_name = "dipSPAdes" if log: - log.info("\n\n" + prefix + " " + err_str) + log.info("\n\n%s %s" % (prefix, err_str)) log_warnings(log, with_error=True) - log.info("\nIn case you have troubles running " + binary_name + ", you can write to spades.support@cab.spbu.ru") - log.info("Please provide us with params.txt and " + binary_name.lower() + ".log files from the output directory.") + log.info("\nIn case you have troubles running %s, you can write to spades.support@cab.spbu.ru" % binary_name) + log.info("or report an issue on our GitHub repository github.com/ablab/spades") + log.info( + "Please provide us with params.txt and %s.log files from the output directory." % binary_name.lower()) else: - sys.stderr.write("\n\n" + prefix + " " + err_str + "\n\n") - sys.stderr.write("\nIn case you have troubles running " + binary_name + ", you can write to spades.support@cab.spbu.ru\n") - sys.stderr.write("Please provide us with params.txt and " + binary_name.lower() + ".log files from the output directory.\n") + sys.stderr.write("\n\n%s %s\n\n" % (prefix, err_str)) + sys.stderr.write( + "\nIn case you have troubles running %s, you can write to spades.support@cab.spbu.ru\n" % binary_name) + sys.stderr.write("or report an issue on our GitHub repository github.com/ablab/spades\n") + sys.stderr.write( + "Please provide us with params.txt and %s.log files from the output directory.\n" % binary_name.lower()) sys.stderr.flush() if current_tmp_dir and os.path.isdir(current_tmp_dir): shutil.rmtree(current_tmp_dir) @@ -53,45 +63,66 @@ def error(err_str, log=None, dipspades=False, prefix=SPADES_PY_ERROR_MESSAGE): def warning(warn_str, log=None, prefix="== Warning == "): if log: - log.info("\n\n" + prefix + " " + warn_str + "\n\n") + log.info("\n\n%s %s\n\n" % (prefix, warn_str)) else: - sys.stdout.write("\n\n" + prefix + " " + warn_str + "\n\n\n") + sys.stdout.write("\n\n%s %s\n\n\n" % (prefix, warn_str)) sys.stdout.flush() def check_python_version(): - if sys.version[0:3] not in options_storage.SUPPORTED_PYTHON_VERSIONS: - error("python version " + sys.version[0:3] + " is not supported!\n" + \ - "Supported versions are " + ", ".join(options_storage.SUPPORTED_PYTHON_VERSIONS)) + def __next_version(version): + components = version.split('.') + for i in reversed(range(len(components))): + if components[i].isdigit(): + components[i] = str(int(components[i]) + 1) + break + return '.'.join(components) + + current_version = sys.version.split()[0] + supported_versions_msg = [] + for supported_versions in options_storage.SUPPORTED_PYTHON_VERSIONS: + major = supported_versions[0] + if '-' in supported_versions: # range + min_inc, max_inc = supported_versions.split('-') + elif supported_versions.endswith('+'): # half open range + min_inc, max_inc = supported_versions[:-1], major + else: # exact version + min_inc = max_inc = supported_versions + max_exc = __next_version(max_inc) + supported_versions_msg.append("Python%s: %s" % (major, supported_versions.replace('+', " and higher"))) + if LooseVersion(min_inc) <= LooseVersion(current_version) < LooseVersion(max_exc): + return True + error("python version %s is not supported!\n" + "Supported versions are %s" % (current_version, ", ".join(supported_versions_msg))) def get_spades_binaries_info_message(): - return "You can obtain SPAdes binaries in one of two ways:" +\ - "\n1. Download them from http://bioinf.spbau.ru/content/spades-download" +\ + return "You can obtain SPAdes binaries in one of two ways:" + \ + "\n1. Download them from http://cab.spbu.ru/software/spades/" + \ "\n2. Build source code with ./spades_compile.sh script" def check_binaries(binary_dir, log): - for binary in ["hammer", "ionhammer", "spades", "bwa-spades", "dipspades"]: + for binary in ["spades-hammer", "spades-ionhammer", "spades-core", "spades-bwa"]: binary_path = os.path.join(binary_dir, binary) if not os.path.isfile(binary_path): - error("SPAdes binaries not found: " + binary_path + "\n" + get_spades_binaries_info_message(), log) + error("SPAdes binaries not found: %s\n%s" % (binary_path, get_spades_binaries_info_message()), log) -def check_file_existence(input_filename, message="", log=None, dipspades=False): +def check_file_existence(input_filename, message="", log=None): filename = abspath(expanduser(input_filename)) check_path_is_ascii(filename, message) if not os.path.isfile(filename): - error("file not found: %s (%s)" % (filename, message), log=log, dipspades=dipspades) + error("file not found: %s (%s)" % (filename, message), log=log) options_storage.dict_of_rel2abs[input_filename] = filename return filename -def check_dir_existence(input_dirname, message="", log=None, dipspades=False): +def check_dir_existence(input_dirname, message="", log=None): dirname = abspath(expanduser(input_dirname)) check_path_is_ascii(dirname, message) if not os.path.isdir(dirname): - error("directory not found: %s (%s)" % (dirname, message), log=log, dipspades=dipspades) + error("directory not found: %s (%s)" % (dirname, message), log=log) options_storage.dict_of_rel2abs[input_dirname] = dirname return dirname @@ -101,6 +132,7 @@ def check_path_is_ascii(path, message=""): error("path contains non-ASCII characters: %s (%s)" % (path, message)) +# FIXME: "isfile" for dirname looks strange. def ensure_dir_existence(dirname): if os.path.isfile(dirname): os.remove(dirname) @@ -109,7 +141,7 @@ def ensure_dir_existence(dirname): def recreate_dir(dirname): - if os.path.exists(dirname): + if os.path.isdir(dirname): shutil.rmtree(dirname) os.makedirs(dirname) @@ -120,30 +152,34 @@ def check_files_duplication(filenames, log): error("file %s was specified at least twice" % filename, log) -def check_reads_file_format(filename, message, only_assembler, library_type, log): +def check_reads_file_format(filename, message, only_assembler, iontorrent, library_type, log): if filename in options_storage.dict_of_prefixes: ext = options_storage.dict_of_prefixes[filename] else: ext = os.path.splitext(filename)[1] - if ext.lower() == '.gz': + if ext.lower() == ".gz": pre_ext = os.path.splitext(filename[:-len(ext)])[1] if (pre_ext + ext).lower() in options_storage.ALLOWED_READS_EXTENSIONS: ext = pre_ext + ext - else: # allows ".fastq.1.gz" like extensions + else: # allows ".fastq.1.gz" like extensions pre_pre_ext = os.path.splitext(filename[:-len(pre_ext + ext)])[1] ext = pre_pre_ext + ext if ext.lower() not in options_storage.ALLOWED_READS_EXTENSIONS: - error("file with reads has unsupported format (only " + ", ".join(options_storage.ALLOWED_READS_EXTENSIONS) + - " are supported): %s (%s)" % (filename, message), log) + error("file with reads has unsupported format (only %s are supported): %s (%s)" % + (", ".join(options_storage.ALLOWED_READS_EXTENSIONS), filename, message), log) + + if not iontorrent and ext.lower() in options_storage.IONTORRENT_ONLY_ALLOWED_READS_EXTENSIONS: + error(", ".join(options_storage.IONTORRENT_ONLY_ALLOWED_READS_EXTENSIONS) + + " formats supported only for iontorrent mode: %s (%s)" % (filename, message), log) + if not only_assembler and ext.lower() not in options_storage.BH_ALLOWED_READS_EXTENSIONS and \ - library_type not in options_storage.LONG_READS_TYPES: - error("to run read error correction, reads should be in FASTQ format (" + - ", ".join(options_storage.BH_ALLOWED_READS_EXTENSIONS) + - " are supported): %s (%s)" % (filename, message), log) + library_type not in options_storage.LONG_READS_TYPES: + error("to run read error correction, reads should be in FASTQ format (%s are supported): %s (%s)" % + (", ".join(options_storage.BH_ALLOWED_READS_EXTENSIONS), filename, message), log) + if library_type.endswith("contigs") and ext.lower() not in options_storage.CONTIGS_ALLOWED_READS_EXTENSIONS: - error("file with " + library_type + " should be in FASTA format (" + - ", ".join(options_storage.CONTIGS_ALLOWED_READS_EXTENSIONS) + - " are supported): %s (%s)" % (filename, message), log) + error("file with %s should be in FASTA format (%s are supported): %s (%s)" % + (library_type, ", ".join(options_storage.CONTIGS_ALLOWED_READS_EXTENSIONS), filename, message), log) # http://stackoverflow.com/questions/377017/test-if-executable-exists-in-python @@ -171,8 +207,8 @@ def get_available_memory(): try: for line in open(mem_info_filename): if line.startswith(avail_mem_header): - avail_mem = int(line[len(avail_mem_header):].split()[0]) # in kB - avail_mem /= 1024 * 1024 # in GB + avail_mem = int(line[len(avail_mem_header):].split()[0]) # in kB + avail_mem /= 1024 * 1024 # in GB return avail_mem except ValueError: return None @@ -184,7 +220,7 @@ def get_available_memory(): # based on http://stackoverflow.com/questions/196345/how-to-check-if-a-string-in-python-is-in-ascii def is_ascii_string(line): try: - line.encode('ascii') + line.encode("ascii") except UnicodeDecodeError: # python2 return False except UnicodeEncodeError: # python3 @@ -193,14 +229,14 @@ def is_ascii_string(line): return True -def process_readline(line, is_python3=sys.version.startswith('3.')): +def process_readline(line, is_python3=sys.version.startswith("3.")): if is_python3: - return str(line, 'utf-8').rstrip() + return str(line, "utf-8").rstrip() return line.rstrip() def process_spaces(str): - if str.find(" ") != -1: + if " " in str: str = '"' + str + '"' return str @@ -216,7 +252,7 @@ def sys_call(cmd, log=None, cwd=None): proc = subprocess.Popen(cmd_list, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=cwd) - output = '' + output = "" while not proc.poll(): line = process_readline(proc.stdout.readline()) if line: @@ -236,7 +272,7 @@ def sys_call(cmd, log=None, cwd=None): output += line + "\n" if proc.returncode: - error('system call for: "%s" finished abnormally, err code: %d' % (cmd, proc.returncode), log) + error("system call for: \"%s\" finished abnormally, OS return value: %d" % (cmd, proc.returncode), log) return output @@ -278,11 +314,11 @@ def universal_sys_call(cmd, log, out_filename=None, err_filename=None, cwd=None) if not out_filename: for line in proc.stdout.readlines(): - if line != '': + if line != "": log.info(process_readline(line)) if not err_filename: for line in proc.stderr.readlines(): - if line != '': + if line != "": log.info(process_readline(line)) else: proc.wait() @@ -292,18 +328,18 @@ def universal_sys_call(cmd, log, out_filename=None, err_filename=None, cwd=None) if err_filename: stderr.close() if proc.returncode: - error('system call for: "%s" finished abnormally, err code: %d' % (cmd, proc.returncode), log) + error("system call for: \"%s\" finished abnormally, OS return value: %d" % (cmd, proc.returncode), log) def save_data_to_file(data, file): - output = open(file, 'wb') - output.write(data.read()) - output.close() + with open(file, "wb") as output: + output.write(data.read()) + os.chmod(file, stat.S_IWRITE | stat.S_IREAD | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH) def get_important_messages_from_log(log_filename, warnings=True): - def already_saved(list_to_check, suffix): # for excluding duplicates (--continue-from may cause them) + def already_saved(list_to_check, suffix): # for excluding duplicates (--continue-from may cause them) for item in list_to_check: if item.endswith(suffix): return True @@ -316,34 +352,33 @@ def already_saved(list_to_check, suffix): # for excluding duplicates (--continue spades_py_message = SPADES_PY_ERROR_MESSAGE spades_message = SPADES_ERROR_MESSAGE - ### for capturing correct warnings in case of continue_mode + # for capturing correct warnings in case of continue_mode if continue_logfile_offset: - continued_log = open(log_filename, 'r') - continued_log.seek(continue_logfile_offset) - continued_stage_phrase = continued_log.readline() - while not continued_stage_phrase.strip(): + with open(log_filename) as continued_log: + continued_log.seek(continue_logfile_offset) continued_stage_phrase = continued_log.readline() - lines_to_check = continued_log.readlines() - continued_log.close() + while not continued_stage_phrase.strip(): + continued_stage_phrase = continued_log.readline() + lines_to_check = continued_log.readlines() - all_lines = open(log_filename, 'r').readlines() + all_lines = open(log_filename).readlines() failed_stage_index = all_lines.index(continued_stage_phrase) lines_to_check = all_lines[:failed_stage_index] + lines_to_check else: - lines_to_check = open(log_filename, 'r').readlines() + lines_to_check = open(log_filename).readlines() spades_py_msgs = [] spades_msgs = [] - IMPORTANT_MESSAGE_SUMMARY_PREFIX = ' * ' + IMPORTANT_MESSAGE_SUMMARY_PREFIX = " * " for line in lines_to_check: if line.startswith(IMPORTANT_MESSAGE_SUMMARY_PREFIX): continue - if line.find(spades_py_message) != -1: + if spades_py_message in line: suffix = line[line.find(spades_py_message) + len(spades_py_message):].strip() - line = line.replace(spades_py_message, '').strip() + line = line.replace(spades_py_message, "").strip() if not already_saved(spades_py_msgs, suffix): spades_py_msgs.append(IMPORTANT_MESSAGE_SUMMARY_PREFIX + line) - elif line.find(spades_message) != -1: + elif spades_message in line: suffix = line[line.find(spades_message) + len(spades_message):].strip() line = line.strip() if not already_saved(spades_msgs, suffix): @@ -353,8 +388,8 @@ def already_saved(list_to_check, suffix): # for excluding duplicates (--continue def get_logger_filename(log): log_file = None - for h in log.__dict__['handlers']: - if h.__class__.__name__ == 'FileHandler': + for h in log.__dict__["handlers"]: + if h.__class__.__name__ == "FileHandler": log_file = h.baseFilename return log_file @@ -363,7 +398,7 @@ def log_warnings(log, with_error=False): log_file = get_logger_filename(log) if not log_file: return False - for h in log.__dict__['handlers']: + for h in log.__dict__["handlers"]: h.flush() spades_py_warns, spades_warns = get_important_messages_from_log(log_file, warnings=True) if spades_py_warns or spades_warns: @@ -374,7 +409,7 @@ def log_warnings(log, with_error=False): warnings_filename = os.path.join(os.path.dirname(log_file), "warnings.log") warnings_handler = logging.FileHandler(warnings_filename, mode='w') log.addHandler(warnings_handler) - #log.info("===== Warnings occurred during SPAdes run =====") + # log.info("===== Warnings occurred during SPAdes run =====") log.info("") if spades_py_warns: log.info("=== Pipeline warnings:") @@ -397,22 +432,16 @@ def log_warnings(log, with_error=False): def continue_from_here(log): - if options_storage.continue_mode: - options_storage.continue_mode = False + if options_storage.args.continue_mode: + options_storage.args.continue_mode = False log_filename = get_logger_filename(log) if log_filename: - log_file = open(log_filename, 'r') - log_file.seek(0, 2) # seek to the end of file + log_file = open(log_filename) + log_file.seek(0, 2) # seek to the end of file global continue_logfile_offset continue_logfile_offset = log_file.tell() -def finish_here(log): - log.info("\n======= Skipping the rest of SPAdes pipeline (--stop-after was set to '%s'). " - "You can continue later with --continue or --restart-from options\n" % options_storage.stop_after) - options_storage.run_completed = True - - def get_latest_dir(pattern): def atoi(text): if text.isdigit(): @@ -420,7 +449,7 @@ def atoi(text): return text def natural_keys(text): - return [atoi(c) for c in re.split('(\d+)', text)] + return [atoi(c) for c in re.split("(\d+)", text)] latest_dir = None for dir_to_test in sorted(glob.glob(pattern), key=natural_keys, reverse=True): @@ -434,37 +463,37 @@ def get_tmp_dir(prefix="", base_dir=None): global current_tmp_dir if not base_dir: - base_dir = options_storage.tmp_dir + base_dir = options_storage.args.tmp_dir if not os.path.isdir(base_dir): os.makedirs(base_dir) current_tmp_dir = tempfile.mkdtemp(dir=base_dir, prefix=prefix) return current_tmp_dir -### START for processing YAML files +# START for processing YAML files def get_short_reads_type(option): for short_reads_type in options_storage.SHORT_READS_TYPES.keys(): - if option.startswith('--' + short_reads_type): + if option.startswith("--" + short_reads_type): # additional check to except collisions with LONG_READS_TYPES, e.g. --s<#> and --sanger - if option[len('--' + short_reads_type):len('--' + short_reads_type) + 1].isdigit(): + if option[len("--" + short_reads_type):len("--" + short_reads_type) + 1].isdigit(): return short_reads_type return None def get_long_reads_type(option): for long_reads_type in options_storage.LONG_READS_TYPES: - if option.startswith('--') and option in ("--" + long_reads_type): + if option.startswith("--") and option in ("--" + long_reads_type): return long_reads_type return None def is_single_read_type(option): - return option.startswith('--s') and option[3:].isdigit() + return option.startswith("--s") and option[3:].isdigit() def get_lib_type_and_number(option): # defaults for simple -1, -2, -s, --12 options - lib_type = 'pe' + lib_type = "pe" lib_number = 1 if get_short_reads_type(option): @@ -476,22 +505,24 @@ def get_lib_type_and_number(option): def get_data_type(option): - if option.endswith('-12'): - data_type = 'interlaced reads' - elif option.endswith('-1'): - data_type = 'left reads' - elif option.endswith('-2'): - data_type = 'right reads' - elif option.endswith('-s') or is_single_read_type(option) or get_long_reads_type(option): - data_type = 'single reads' - else: # -rf, -ff, -fr - data_type = 'orientation' + if option.endswith("-12"): + data_type = "interlaced reads" + elif option.endswith("-1"): + data_type = "left reads" + elif option.endswith("-2"): + data_type = "right reads" + elif option.endswith("-s") or is_single_read_type(option) or get_long_reads_type(option): + data_type = "single reads" + elif option.endswith("-m") or option.endswith("-merged"): + data_type = "merged reads" + else: # -rf, -ff, -fr + data_type = "orientation" return data_type def get_option_prefix(data): prefix = None - if data.find(':') != -1 and ('.' + data[:data.find(':')]) in options_storage.ALLOWED_READS_EXTENSIONS: + if ':' in data and ('.' + data[:data.find(':')]) in options_storage.ALLOWED_READS_EXTENSIONS: prefix = data[:data.find(':')] data = data[data.find(':') + 1:] return data, prefix @@ -499,25 +530,18 @@ def get_option_prefix(data): def add_to_dataset(option, data, dataset_data): lib_type, lib_number = get_lib_type_and_number(option) + record_id = "%s_%d" % (lib_type, lib_number) data_type = get_data_type(option) - if data_type == 'orientation': + if data_type == "orientation": data = option[-2:] - if lib_type in options_storage.SHORT_READS_TYPES: - record_id = options_storage.MAX_LIBS_NUMBER * sorted(options_storage.SHORT_READS_TYPES.keys()).index(lib_type) \ - + lib_number - 1 - elif lib_type in options_storage.LONG_READS_TYPES: - record_id = options_storage.MAX_LIBS_NUMBER * len(options_storage.SHORT_READS_TYPES.keys()) \ - + options_storage.LONG_READS_TYPES.index(lib_type) - else: - error("can't detect library type from option %s!" % option) - - if not dataset_data[record_id]: # setting default values for a new record + if record_id not in dataset_data: # setting default values for a new record + dataset_data[record_id] = {} if lib_type in options_storage.SHORT_READS_TYPES: - dataset_data[record_id]['type'] = options_storage.SHORT_READS_TYPES[lib_type] + dataset_data[record_id]["type"] = options_storage.SHORT_READS_TYPES[lib_type] else: - dataset_data[record_id]['type'] = lib_type - if data_type.endswith('reads'): + dataset_data[record_id]["type"] = lib_type + if data_type.endswith("reads"): data, prefix = get_option_prefix(data) if prefix: options_storage.dict_of_prefixes[data] = '.' + prefix @@ -532,28 +556,28 @@ def add_to_dataset(option, data, dataset_data): def correct_dataset(dataset_data): # removing empty reads libraries corrected_dataset_data = [] - for reads_library in dataset_data: + for reads_library in dataset_data.values() if isinstance(dataset_data, dict) else dataset_data: if not reads_library: continue has_reads = False has_paired_reads = False for key in reads_library.keys(): - if key.endswith('reads'): + if key.endswith("reads"): has_reads = True - if key in ['interlaced reads', 'left reads', 'right reads']: + if key in ["interlaced reads", "merged reads", "left reads", "right reads"]: has_paired_reads = True break if not has_reads: continue - if not has_paired_reads and reads_library['type'] == 'paired-end': - reads_library['type'] = 'single' - if 'orientation' in reads_library: - del reads_library['orientation'] - if 'orientation' not in reads_library: - if reads_library['type'] == 'paired-end' or reads_library['type'] == 'hq-mate-pairs': - reads_library['orientation'] = 'fr' - elif reads_library['type'] == 'mate-pairs': - reads_library['orientation'] = 'rf' + if not has_paired_reads and reads_library["type"] == "paired-end": + reads_library["type"] = "single" + if "orientation" in reads_library: + del reads_library["orientation"] + if "orientation" not in reads_library: + if reads_library["type"] == "paired-end" or reads_library["type"] == "hq-mate-pairs": + reads_library["orientation"] = "fr" + elif reads_library["type"] == "mate-pairs": + reads_library["orientation"] = "rf" corrected_dataset_data.append(reads_library) return corrected_dataset_data @@ -563,7 +587,7 @@ def relative2abs_paths(dataset_data, dirname): abs_paths_dataset_data = [] for reads_library in dataset_data: for key, value in reads_library.items(): - if key.endswith('reads'): + if key.endswith("reads"): abs_paths_reads = [] for reads_file in value: abs_path = abspath(join(dirname, expanduser(reads_file))) @@ -577,45 +601,90 @@ def relative2abs_paths(dataset_data, dirname): return abs_paths_dataset_data -def check_dataset_reads(dataset_data, only_assembler, log): +def get_reads_length(dataset_data, log, ignored_types, + used_types=options_storage.READS_TYPES_USED_IN_CONSTRUCTION, + num_checked=10 ** 4, diff_len_allowable=25): + max_reads_lenghts = [get_max_reads_length(reads_file, log, num_checked) for reads_file in + get_reads_files(dataset_data, log, ignored_types, used_types)] + + avg_len = sum(max_reads_lenghts) / len(max_reads_lenghts) + for max_len in max_reads_lenghts: + if math.fabs(max_len - avg_len) > diff_len_allowable: + warning("read lengths differ more than allowable. Length: %f. Avg. length: %f." % (max_len, avg_len), log) + reads_length = min(max_reads_lenghts) + log.info("\nReads length: %d\n" % reads_length) + return reads_length + + +def get_primary_max_reads_length(dataset_data, log, ignored_types, used_types, num_checked=10 ** 4): + max_reads_lenghts = [get_max_reads_length(reads_file, log, num_checked) for reads_file in + get_reads_files(dataset_data, log, ignored_types, used_types)] + + reads_length = max(max_reads_lenghts) + log.info("\nReads length: %d\n" % reads_length) + return reads_length + + +def get_reads_files(dataset_data, log, ignored_types, used_types=None): + for reads_library in dataset_data: + if (used_types is not None) and reads_library["type"] not in used_types: + continue + for key, value in reads_library.items(): + if key in ignored_types: + log.info("Files with %s were ignored." % key) + continue + elif key.endswith("reads"): + for reads_file in value: + yield reads_file + + +def get_max_reads_length(reads_file, log, num_checked): + if reads_file in options_storage.dict_of_prefixes: + ext = options_storage.dict_of_prefixes[reads_file] + file_type = SeqIO.get_read_file_type(ext) + else: + file_type = SeqIO.get_read_file_type(reads_file) + + if not file_type: + error("incorrect extension of reads file: %s" % reads_file, log) + + max_reads_length = max( + [len(rec) for rec in itertools.islice(SeqIO.parse(SeqIO.Open(reads_file, "r"), file_type), num_checked)]) + log.info("%s: max reads length: %s" % (reads_file, str(max_reads_length))) + return max_reads_length + + +def check_dataset_reads(dataset_data, only_assembler, iontorrent, log): all_files = [] for id, reads_library in enumerate(dataset_data): left_number = 0 right_number = 0 for key, value in reads_library.items(): - if key.endswith('reads'): + if key.endswith("reads"): for reads_file in value: - check_file_existence(reads_file, key + ', library number: ' + str(id + 1) + - ', library type: ' + reads_library['type'], log) - check_reads_file_format(reads_file, key + ', library number: ' + str(id + 1) + - ', library type: ' + reads_library['type'], only_assembler, reads_library['type'], log) + check_file_existence(reads_file, + "%s, library number: %d, library type: %s" % + (key, id + 1, reads_library["type"]), log) + check_reads_file_format(reads_file, "%s, library number: %d, library type: %s" % + (key, id + 1, reads_library["type"]), only_assembler, iontorrent, + reads_library["type"], log) all_files.append(reads_file) - if key == 'left reads': + if key == "left reads": left_number = len(value) - elif key == 'right reads': + elif key == "right reads": right_number = len(value) if left_number != right_number: - error('the number of files with left paired reads is not equal to the number of files ' - 'with right paired reads (library number: ' + str(id + 1) + - ', library type: ' + reads_library['type'] + ')!', log) + error("the number of files with left paired reads is not equal to the number of files " + "with right paired reads (library number: %d, library type: %s)!" % + (id + 1, reads_library["type"]), log) if not len(all_files): - error("You should specify at least one file with reads!", log) + error("you should specify at least one file with reads!", log) check_files_duplication(all_files, log) -def check_single_reads_in_options(options, log): - only_old_style_options = True - old_style_single_reads = False - for option in options: - if option not in options_storage.reads_options: - continue - if option in options_storage.OLD_STYLE_READS_OPTIONS: - if option == '-s': - old_style_single_reads = True - else: - only_old_style_options = False +def check_single_reads_in_options(log): if not only_old_style_options and old_style_single_reads: - warning("It is recommended to specify single reads with --pe<#>-s, --mp<#>-s, --hqmp<#>-s, " + warning("it is recommended to specify single reads with --pe<#>-s, --mp<#>-s, --hqmp<#>-s, " "or --s<#> option instead of -s!", log) @@ -624,7 +693,7 @@ def get_lib_ids_by_type(dataset_data, types): types = [types] lib_ids = [] for id, reads_library in enumerate(dataset_data): - if reads_library['type'] in types: + if reads_library["type"] in types: lib_ids.append(id) return lib_ids @@ -654,221 +723,62 @@ def dataset_is_empty(dataset_data): def dataset_has_gzipped_reads(dataset_data): for reads_library in dataset_data: for key in reads_library: - if key.endswith('reads'): + if key.endswith("reads"): for reads_file in reads_library[key]: - if reads_file.endswith('.gz'): + if reads_file.endswith(".gz"): return True return False def dataset_has_interlaced_reads(dataset_data): for reads_library in dataset_data: - if 'interlaced reads' in reads_library: + if "interlaced reads" in reads_library: return True return False def dataset_has_additional_contigs(dataset_data): for reads_library in dataset_data: - if reads_library['type'].endswith('contigs'): + if reads_library["type"].endswith("contigs"): return True return False def dataset_has_nxmate_reads(dataset_data): for reads_library in dataset_data: - if reads_library['type'] == 'nxmate': + if reads_library["type"] == "nxmate": return True return False -def process_Ns_in_additional_contigs(dataset_data, dst, log): - new_dataset_data = list() - for reads_library in dataset_data: - new_reads_library = dict(reads_library) - if reads_library["type"].endswith("contigs"): - new_entry = [] - for contigs in reads_library["single reads"]: - if contigs in options_storage.dict_of_prefixes: - ext = options_storage.dict_of_prefixes[contigs] - basename = contigs - else: - basename, ext = os.path.splitext(contigs) - gzipped = False - if ext.endswith('.gz'): - gzipped = True - if contigs not in options_storage.dict_of_prefixes: - basename, _ = os.path.splitext(basename) - modified, new_fasta = break_scaffolds(contigs, options_storage.THRESHOLD_FOR_BREAKING_ADDITIONAL_CONTIGS, - replace_char='A', gzipped=gzipped) - if modified: - if not os.path.isdir(dst): - os.makedirs(dst) - new_filename = os.path.join(dst, os.path.basename(basename) + '.fasta') - if contigs in options_storage.dict_of_prefixes: - del options_storage.dict_of_prefixes[contigs] - log.info("== Processing additional contigs (%s): changing Ns to As and " - "splitting by continues (>= %d) Ns fragments (results are in %s directory)" % (contigs, - options_storage.THRESHOLD_FOR_BREAKING_ADDITIONAL_CONTIGS, dst)) - write_fasta(new_filename, new_fasta) - new_entry.append(new_filename) - else: - new_entry.append(contigs) - new_reads_library["single reads"] = new_entry - new_dataset_data.append(new_reads_library) - return new_dataset_data - - -def split_interlaced_reads(dataset_data, dst, log): - def write_single_read(in_file, out_file, read_name=None, is_fastq=False, is_python3=False): - if read_name is None: - read_name = process_readline(in_file.readline(), is_python3) - if not read_name: - return '' # no next read - read_value = process_readline(in_file.readline(), is_python3) - line = process_readline(in_file.readline(), is_python3) - fpos = in_file.tell() - while (is_fastq and not line.startswith('+')) or (not is_fastq and not line.startswith('>')): - read_value += line - line = process_readline(in_file.readline(), is_python3) - if not line: - if fpos == in_file.tell(): - break - fpos = in_file.tell() - out_file.write(read_name + '\n') - out_file.write(read_value + '\n') - - if is_fastq: - read_quality = process_readline(in_file.readline(), is_python3) - line = process_readline(in_file.readline(), is_python3) - while not line.startswith('@'): - read_quality += line - line = process_readline(in_file.readline(), is_python3) - if not line: - if fpos == in_file.tell(): - break - fpos = in_file.tell() - if len(read_value) != len(read_quality): - error('The length of sequence and quality lines should be the same! ' - 'Check read %s (SEQ length is %d, QUAL length is %d)' % - (read_name, len(read_value), len(read_quality)), log) - out_file.write('+\n') - out_file.write(read_quality + '\n') - return line # next read name or empty string - - new_dataset_data = list() - for reads_library in dataset_data: - new_reads_library = dict(reads_library) - for key, value in reads_library.items(): - if key == 'interlaced reads': - if 'left reads' not in new_reads_library: - new_reads_library['left reads'] = [] - new_reads_library['right reads'] = [] - for interlaced_reads in value: - if interlaced_reads in options_storage.dict_of_prefixes: - ext = options_storage.dict_of_prefixes[interlaced_reads] - else: - ext = os.path.splitext(interlaced_reads)[1] - was_compressed = False - if ext.endswith('.gz'): - was_compressed = True - input_file = gzip.open(interlaced_reads, 'r') - ungzipped = os.path.splitext(interlaced_reads)[0] - out_basename, ext = os.path.splitext(os.path.basename(ungzipped)) - else: - input_file = open(interlaced_reads, 'r') - out_basename, ext = os.path.splitext(os.path.basename(interlaced_reads)) - - if interlaced_reads in options_storage.dict_of_prefixes: - ext = options_storage.dict_of_prefixes[interlaced_reads] - if ext.lower().startswith('.fq') or ext.lower().startswith('.fastq'): - is_fastq = True - ext = '.fastq' - else: - is_fastq = False - ext = '.fasta' - - out_left_filename = os.path.join(dst, out_basename + "_1" + ext) - out_right_filename = os.path.join(dst, out_basename + "_2" + ext) - - if not (options_storage.continue_mode and os.path.isfile(out_left_filename) and os.path.isfile(out_right_filename)): - options_storage.continue_mode = False - log.info("== Splitting " + interlaced_reads + " into left and right reads (in " + dst + " directory)") - out_files = [open(out_left_filename, 'w'), open(out_right_filename, 'w')] - i = 0 - next_read_name = write_single_read(input_file, out_files[i], None, is_fastq, - sys.version.startswith('3.') and was_compressed) - while next_read_name: - i = (i + 1) % 2 - next_read_name = write_single_read(input_file, out_files[i], next_read_name, is_fastq, - sys.version.startswith('3.') and was_compressed) - if i == 0: - error("The number of reads in file with interlaced reads (" + interlaced_reads + ") should be EVEN!", log) - out_files[0].close() - out_files[1].close() - input_file.close() - new_reads_library['left reads'].append(out_left_filename) - new_reads_library['right reads'].append(out_right_filename) - if interlaced_reads in options_storage.dict_of_prefixes: - del options_storage.dict_of_prefixes[interlaced_reads] - del new_reads_library['interlaced reads'] - new_dataset_data.append(new_reads_library) - return new_dataset_data - - -def process_nxmate_reads(dataset_data, dst, log): - try: - import lucigen_nxmate - new_dataset_data = list() - for reads_library in dataset_data: - new_reads_library = dict(reads_library) - if new_reads_library['type'] == 'nxmate': - raw_left_reads = new_reads_library['left reads'] - raw_right_reads = new_reads_library['right reads'] - new_reads_library['left reads'] = [] - new_reads_library['right reads'] = [] - new_reads_library['single reads'] = [] - for id, left_reads_fpath in enumerate(raw_left_reads): - right_reads_fpath = raw_right_reads[id] - processed_left_reads_fpath, processed_right_reads_fpath, single_reads_fpath = \ - lucigen_nxmate.process_reads(left_reads_fpath, right_reads_fpath, dst, log) - new_reads_library['left reads'].append(processed_left_reads_fpath) - new_reads_library['right reads'].append(processed_right_reads_fpath) - new_reads_library['single reads'].append(single_reads_fpath) - new_reads_library['type'] = 'mate-pairs' - new_reads_library['orientation'] = 'fr' - new_dataset_data.append(new_reads_library) - return new_dataset_data - except ImportError: - error("Can't process Lucigen NxMate reads! lucigen_nxmate.py is missing!", log) - - -def pretty_print_reads(dataset_data, log, indent=' '): - READS_TYPES = ['left reads', 'right reads', 'interlaced reads', 'single reads'] +def pretty_print_reads(dataset_data, log, indent=" "): + READS_TYPES = ["left reads", "right reads", "interlaced reads", "single reads", "merged reads"] for id, reads_library in enumerate(dataset_data): - log.info(indent + 'Library number: ' + str(id + 1) + ', library type: ' + reads_library['type']) - if 'orientation' in reads_library: - log.info(indent + ' orientation: ' + reads_library['orientation']) + log.info(indent + "Library number: %d, library type: %s" % (id + 1, reads_library["type"])) + if "orientation" in reads_library: + log.info("%s orientation: %s" % (indent, reads_library["orientation"])) for reads_type in READS_TYPES: if reads_type not in reads_library: - value = 'not specified' + value = "not specified" else: value = str(reads_library[reads_type]) - log.info(indent + ' ' + reads_type + ': ' + value) -### END: for processing YAML files + log.info("%s %s: %s" % (indent, reads_type, value)) + + +# END: for processing YAML files def read_fasta(filename, gzipped=False): res_name = [] res_seq = [] first = True - seq = '' + seq = "" if gzipped: file_handler = gzip.open(filename) else: file_handler = open(filename) for line in file_handler: - line = process_readline(line, gzipped and sys.version.startswith('3.')) + line = process_readline(line, gzipped and sys.version.startswith("3.")) if not line: continue if line[0] == '>': @@ -877,7 +787,7 @@ def read_fasta(filename, gzipped=False): res_seq.append(seq) else: first = False - seq = '' + seq = "" else: seq += line.strip() res_seq.append(seq) @@ -886,12 +796,11 @@ def read_fasta(filename, gzipped=False): def write_fasta(filename, fasta): - outfile = open(filename, 'w') - for name, seq in fasta: - outfile.write(name + '\n') - for i in range(0, len(seq), 60): - outfile.write(seq[i : i + 60] + '\n') - outfile.close() + with open(filename, 'w') as outfile: + for name, seq in fasta: + outfile.write(name + '\n') + for i in range(0, len(seq), 60): + outfile.write(seq[i: i + 60] + '\n') def break_scaffolds(input_filename, threshold, replace_char="N", gzipped=False): @@ -928,16 +837,16 @@ def comp(letter): def rev_comp(seq): - return ''.join(itertools.imap(comp, seq[::-1])) + return "".join(itertools.imap(comp, seq[::-1])) def get_contig_id(s): values = s.split("_") if len(values) < 2 or (values[0] != ">NODE" and values[0] != "NODE"): - warning("Contig %s has unknown ID format" % (s)) + warning("contig %s has unknown ID format" % (s)) return None - if s.find("'") != -1: - return (values[1] + "'") + if "'" in s: + return values[1] + "'" return values[1] diff --git a/src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/truspades/__init__.py b/src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/truspades/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/SPAdes-3.10.1-Linux/share/spades/spades_pipeline/truspades/barcode_extraction.py b/src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/truspades/barcode_extraction.py similarity index 92% rename from src/SPAdes-3.10.1-Linux/share/spades/spades_pipeline/truspades/barcode_extraction.py rename to src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/truspades/barcode_extraction.py index 15efbce..1eb7fa4 100644 --- a/src/SPAdes-3.10.1-Linux/share/spades/spades_pipeline/truspades/barcode_extraction.py +++ b/src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/truspades/barcode_extraction.py @@ -9,8 +9,6 @@ import logging from id_generation import generate_ids -from string_dist_utils import lcs, dist - __author__ = 'anton' @@ -30,6 +28,7 @@ def add_ps(self, prefix, suffix): def __str__(self): return self.id + " " + " ".join([" ".join(lib) for lib in self.libs]) + def RemoveLabel(s, code, code_range): for pos in range(len(s)): if s[pos:].startswith(code): @@ -40,38 +39,45 @@ def RemoveLabel(s, code, code_range): return s[:pos] + s[new_pos + len(tmp):] return s + def NormalizeR(s): - return RemoveLabel(s, "R", [1,2]) + return RemoveLabel(s, "R", [1, 2]) + def NormalizeLR(s): s = NormalizeR(s) return RemoveLabel(s, "L", range(1, 20)) + def check_int_ids(ids): for id in ids: if not id[1].isdigit(): return False return True + def generate_barcode_list(barcodes): ids = list(zip(barcodes, generate_ids(barcodes))) if check_int_ids(ids): ids = sorted(ids, key=lambda barcode: int(barcode[1])) return [(bid, "BC_" + short_id) for bid, short_id in ids] + def Normalize(file_path): return NormalizeLR(os.path.basename(file_path)) + def GroupBy(norm, l): result = dict() for line in l: key = norm(line) - if not key in result: + if key not in result: result[key] = [] result[key].append(line) return result -def CheckSameSize(iter, size = -1): + +def CheckSameSize(iter, size=-1): for vl in iter: if size == -1: size = len(vl) @@ -79,11 +85,13 @@ def CheckSameSize(iter, size = -1): return False return True -#todo: write better code + +# todo: write better code def ExtractBarcodes(dirs): files = [] for dir in dirs: - for file in [os.path.abspath(os.path.join(dir, file)) for file in os.listdir(dir) if os.path.isfile(os.path.join(dir, file))]: + for file in [os.path.abspath(os.path.join(dir, file)) for file in os.listdir(dir) if + os.path.isfile(os.path.join(dir, file))]: files.append(file) barcode_dict = GroupBy(Normalize, files) if not CheckSameSize(barcode_dict.values()): @@ -95,7 +103,8 @@ def ExtractBarcodes(dirs): short_barcodes = generate_barcode_list(list(barcode_dict.keys())) return [Barcode(short, barcode_dict[bid]) for bid, short in short_barcodes] -def ReadDataset(file, log = logging.getLogger("ReadDataset")): + +def ReadDataset(file, log=logging.getLogger("ReadDataset")): log.info("Reading dataset from " + file + "\n") if os.path.exists(file) and os.path.isfile(file): result = [] @@ -117,7 +126,7 @@ def ReadDataset(file, log = logging.getLogger("ReadDataset")): log.info("Error: Dataset file does not exist\n" + file + "\n") sys.exit(1) + def print_dataset(dataset, output_file, log): log.info("Printing dataset to " + output_file) open(output_file, "w").write("\n".join([str(line).strip() for line in dataset]) + "\n") - diff --git a/src/SPAdes-3.10.1-Linux/share/spades/spades_pipeline/truspades/break_by_coverage.py b/src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/truspades/break_by_coverage.py similarity index 92% rename from src/SPAdes-3.10.1-Linux/share/spades/spades_pipeline/truspades/break_by_coverage.py rename to src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/truspades/break_by_coverage.py index b412bc0..0c11826 100644 --- a/src/SPAdes-3.10.1-Linux/share/spades/spades_pipeline/truspades/break_by_coverage.py +++ b/src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/truspades/break_by_coverage.py @@ -7,14 +7,10 @@ ############################################################################ -import SeqIO -from SeqIO import SeqRecord - -import sys import os import shutil -import sam_parser -import itertools + +from common import SeqIO def ConstructCoverage(sam, contigs, k): @@ -36,6 +32,7 @@ def ConstructCoverage(sam, contigs, k): reads = [] return cov + def ConstructCoverageSingle(sam, contigs, k): cov = dict() for contig in range(len(contigs)): @@ -47,6 +44,7 @@ def ConstructCoverageSingle(sam, contigs, k): cov[rec.tid][rec.pos + rec.alen - k] -= 1 return cov + def OutputHist(cov, contigs, folder): if os.path.exists(folder): shutil.rmtree(folder) @@ -59,6 +57,7 @@ def OutputHist(cov, contigs, folder): f.write(str(i) + " " + str(cur) + "\n") f.close() + def ConstructSimpleCoverage(sam, contigs, k): simple_cov = dict() for contig in range(len(contigs)): @@ -69,6 +68,7 @@ def ConstructSimpleCoverage(sam, contigs, k): simple_cov[rec.tid][rec.pos + rec.alen] -= 1 return simple_cov + def BreakContig(cov, k, min0): l = len(cov) - 1 if l < 2 * k: @@ -91,6 +91,7 @@ def BreakContig(cov, k, min0): result.append([prev_break, min(l, l - cur_len0 + k)]) return result + class ContigBreaker: def __init__(self, contigs, sam, k, min0): self.part_list_ = [] @@ -105,8 +106,8 @@ def __init__(self, contigs, sam, k, min0): def Break(self, contig): result = [] - #print contig.id - #print self.sam.gettid(contig.id) + # print contig.id + # print self.sam.gettid(contig.id) for part in self.part_list_[self.sam.gettid(contig.id)]: result.append(contig.subseq(part[0], part[1])) return result @@ -118,6 +119,7 @@ def OutputBroken(self, output_file): SeqIO.write(subcontig, output, "fasta") output.close() + class PatternBreaker: def __init__(self, pattern, rc_pattern, max_cut): self.pattern = pattern @@ -153,22 +155,23 @@ def FindRightPos(self, seq): def Break(self, contig): if len(contig) < 2 * self.max_cut: return [] - l,r = self.FindLeftPos(contig.seq), self.FindRightPos(contig.seq) + l, r = self.FindLeftPos(contig.seq), self.FindRightPos(contig.seq) return [contig.subseq(l, r)] + class NBreaker: def __init__(self, min_N): self.min_N = min_N def Break(self, contig): result = [] - last_break = 0; + last_break = 0 pos = 0 - while(pos < len(contig) and contig[pos] == 'N'): + while (pos < len(contig) and contig[pos] == 'N'): pos += 1 - while pos = self.min_N: result.append(contig.subseq(last_break, pos)) @@ -178,5 +181,5 @@ def Break(self, contig): result.append(contig.subseq(last_break, len(contig))) return result -#if __name__ == '__main__': +# if __name__ == '__main__': # ContigBreaker(sys.argv[1], sys.argv[3], int(sys.argv[4]), int(sys.argv[5])).OutputBroken(sys.argv[2]) diff --git a/src/SPAdes-3.10.1-Linux/share/spades/spades_pipeline/truspades/generate_quality.py b/src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/truspades/generate_quality.py similarity index 90% rename from src/SPAdes-3.10.1-Linux/share/spades/spades_pipeline/truspades/generate_quality.py rename to src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/truspades/generate_quality.py index 77d5000..7204cc3 100644 --- a/src/SPAdes-3.10.1-Linux/share/spades/spades_pipeline/truspades/generate_quality.py +++ b/src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/truspades/generate_quality.py @@ -5,14 +5,12 @@ ############################################################################ import re -import sys -import itertools -import sam_parser pattern = re.compile('([0-9]*)([MIDNSHP])') -def parse(cigar, len, pos = 0): - if cigar == "=" : + +def parse(cigar, len, pos=0): + if cigar == "=": for i in range(len): yield (i, i + pos) return @@ -35,8 +33,9 @@ def parse(cigar, len, pos = 0): elif c in "IS": cur += n + def CollectQuality(contigs, sam): - qual = [[[0,0] for i in range(len(contig))] for contig in contigs] + qual = [[[0, 0] for i in range(len(contig))] for contig in contigs] for rec in sam: if rec.proper_alignment: for seq_pos, contig_pos in parse(rec.cigar, rec.alen, rec.pos - 1): @@ -45,6 +44,7 @@ def CollectQuality(contigs, sam): qual[rec.tid][contig_pos][0] += ord(rec.qual[seq_pos]) return qual + def CountContigQuality(contigs, qual): for i in range(len(contigs)): cnt = 0 @@ -61,4 +61,3 @@ def CountContigQuality(contigs, qual): def GenerateQuality(contigs, sam): qual = CollectQuality(contigs, sam) CountContigQuality(contigs, qual) - diff --git a/src/SPAdes-3.10.1-Linux/share/spades/spades_pipeline/truspades/id_generation.py b/src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/truspades/id_generation.py similarity index 99% rename from src/SPAdes-3.10.1-Linux/share/spades/spades_pipeline/truspades/id_generation.py rename to src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/truspades/id_generation.py index e6b45d5..cb7b1d6 100644 --- a/src/SPAdes-3.10.1-Linux/share/spades/spades_pipeline/truspades/id_generation.py +++ b/src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/truspades/id_generation.py @@ -8,7 +8,6 @@ __author__ = 'anton' -import sys def CommonPrefix(s1, s2): n = 0 @@ -16,12 +15,14 @@ def CommonPrefix(s1, s2): n += 1 return n + def CommonSuffix(s1, s2): n = 0 while n < len(s1) and n < len(s2) and s1[-n - 1] == s2[-n - 1]: n += 1 return n + def FindCommon(lines): if len(lines) == 0: return 0, 0 @@ -35,12 +36,14 @@ def FindCommon(lines): min_len = min(min_len, len(line)) return left, min(right, min_len - left) + def generate_ids(lines): l, r = FindCommon(lines) lines = [line[l: len(line) - r] for line in lines] id_candidates = generate_id_candidates(lines) return select_ids_from_candidates(id_candidates) + def select_ids_from_candidates(id_candidates): if len(id_candidates) == 1: return [""] diff --git a/src/SPAdes-3.10.1-Linux/share/spades/spades_pipeline/truspades/launch_options.py b/src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/truspades/launch_options.py similarity index 94% rename from src/SPAdes-3.10.1-Linux/share/spades/spades_pipeline/truspades/launch_options.py rename to src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/truspades/launch_options.py index 1a7ad18..78b3a24 100644 --- a/src/SPAdes-3.10.1-Linux/share/spades/spades_pipeline/truspades/launch_options.py +++ b/src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/truspades/launch_options.py @@ -8,8 +8,10 @@ import getopt import os import sys + import options_storage + class Options: def set_default_options(self): self.threads = 8 @@ -85,7 +87,8 @@ def __init__(self, argv, bin, home, version): elif key == "--help-hidden": print_usage_and_exit(0, self.version, show_hidden=True) if not self.mode in self.possible_modes: - sys.stderr.write("Error: --do parameter can only have one of the following values: " + ", ".join(self.possible_modes) + "\n") + sys.stderr.write("Error: --do parameter can only have one of the following values: " + ", ".join( + self.possible_modes) + "\n") print_usage_and_exit(1, self.version) if None == self.output_dir or os.path.isfile(self.output_dir): sys.stderr.write("Error: Please provide output directory\n") @@ -119,11 +122,12 @@ def print_usage_and_exit(code, version, show_hidden=False): sys.stderr.write("--construct-dataset\t\tparse dataset from input folder" + "\n") sys.stderr.write("" + "\n") sys.stderr.write("Input options:" + "\n") - sys.stderr.write("--input-dir\t\tdirectory with input data. Note that the directory should contain only files with reads. This option can be used several times to provide several input directories." + "\n") + sys.stderr.write( + "--input-dir\t\tdirectory with input data. Note that the directory should contain only files with reads. This option can be used several times to provide several input directories." + "\n") sys.stderr.write("--dataset\t\t\tfile with dataset description" + "\n") if show_hidden: pass - #ToDo + # ToDo # sys.stderr.write("" + "\n") # sys.stderr.write("Output options:" + "\n") # sys.stderr.write("--print-dataset\tprints file with dataset generated after analysis of input directory contents" + "\n") diff --git a/src/SPAdes-3.10.1-Linux/share/spades/spades_pipeline/truspades/moleculo_filter_contigs.py b/src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/truspades/moleculo_filter_contigs.py similarity index 85% rename from src/SPAdes-3.10.1-Linux/share/spades/spades_pipeline/truspades/moleculo_filter_contigs.py rename to src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/truspades/moleculo_filter_contigs.py index 6a430d3..2abd611 100644 --- a/src/SPAdes-3.10.1-Linux/share/spades/spades_pipeline/truspades/moleculo_filter_contigs.py +++ b/src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/truspades/moleculo_filter_contigs.py @@ -7,12 +7,6 @@ ############################################################################ -import sam_parser - -import SeqIO - -import sys - class PatternContigFilter: def __init__(self, contigs, sam, pattern, rc_pattern): self.sam = sam @@ -23,7 +17,8 @@ def __init__(self, contigs, sam, pattern, rc_pattern): if len(reads) == 2: left_sequence = str(reads[0].seq.upper()) right_sequence = str(reads[1].seq.upper()) - if left_sequence.find(pattern) != -1 or right_sequence.find(rc_pattern) != -1 or right_sequence.find(pattern) != -1 or left_sequence.find(rc_pattern) != -1: + if (pattern in left_sequence) or (rc_pattern in right_sequence) or \ + (pattern in right_sequence) or (rc_pattern in left_sequence): if not reads[0].is_unmapped: self.filter[reads[0].tid] = True if not reads[1].is_unmapped: @@ -33,6 +28,7 @@ def __init__(self, contigs, sam, pattern, rc_pattern): def Filter(self, contig): return self.filter[self.sam.gettid(contig.id)] + class ContigLengthFilter: def __init__(self, min_length): self.min_length = min_length diff --git a/src/SPAdes-3.10.1-Linux/share/spades/spades_pipeline/truspades/moleculo_postprocessing.py b/src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/truspades/moleculo_postprocessing.py similarity index 97% rename from src/SPAdes-3.10.1-Linux/share/spades/spades_pipeline/truspades/moleculo_postprocessing.py rename to src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/truspades/moleculo_postprocessing.py index e923d57..e2cc321 100644 --- a/src/SPAdes-3.10.1-Linux/share/spades/spades_pipeline/truspades/moleculo_postprocessing.py +++ b/src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/truspades/moleculo_postprocessing.py @@ -4,16 +4,16 @@ # See file LICENSE for details. ############################################################################ -import moleculo_filter_contigs import break_by_coverage -import SeqIO -import sys import generate_quality -import sam_parser +import moleculo_filter_contigs +from common import SeqIO +from common import sam_parser pattern = "TACGCTTGCAT" rc_pattern = "ATGCAAGCGTA" + def SplitAndFilter(contigs, coverage_breaker, length_filter, n_breaker, pattern_breaker, pattern_filter): result = [] for contig in contigs: @@ -49,4 +49,3 @@ def moleculo_postprocessing(contigs_file, output_file, sam_files, log): OutputResults(output_file, "fasta", result) OutputResults(output_file, "fastq", result) log.info("===== Postprocessing finished. Results can be found in " + output_file + ".fastq") - diff --git a/src/SPAdes-3.10.1-Linux/share/spades/spades_pipeline/truspades/reference_construction.py b/src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/truspades/reference_construction.py similarity index 91% rename from src/SPAdes-3.10.1-Linux/share/spades/spades_pipeline/truspades/reference_construction.py rename to src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/truspades/reference_construction.py index 511fc1a..2c4768c 100644 --- a/src/SPAdes-3.10.1-Linux/share/spades/spades_pipeline/truspades/reference_construction.py +++ b/src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/truspades/reference_construction.py @@ -7,20 +7,16 @@ ############################################################################ -# wraps samtools, used to read SAM/BAM -#import pysam3 as pysam import logging import os import shutil import sys -import itertools -import traceback -import SeqIO -import alignment -import parallel_launcher -import sam_parser import support +from common import SeqIO +from common import alignment +from common import sam_parser + # for fasta/q I/O @@ -44,7 +40,7 @@ def Join(self, rec): def __str__(self): return str(self.rname) + "(" + str(self.Coverage()) + "): [" + str(self.left) + ", " + str(self.right) + "]" - + def __cmp__(self, other): if other == None: return -1 @@ -57,6 +53,7 @@ def __cmp__(self, other): return self.left - other.left return self.right - other.right + def CollectParts(recs, step, mincov, minlen): res = [] cur = None @@ -71,10 +68,12 @@ def CollectParts(recs, step, mincov, minlen): res.append(cur) return res + def PrintParts(recs, out): for rec in recs: out.write(str(rec) + "\n") + def ReadReference(file): result = dict() for rec in SeqIO.parse_fasta(open(file, "r")): @@ -83,8 +82,8 @@ def ReadReference(file): def ConstructSubreferenceFromSam(sam_files): - #todo: make online - #todo: use config + # todo: make online + # todo: use config recs = [] for sam_file in sam_files: sam = sam_parser.Samfile(sam_file) @@ -103,11 +102,12 @@ def PrintResults(recs, reference, references_file, coordinates_file): for rec in recs: aln.write(str(rec) + "\n") sequence = reference[rec.rname][rec.left:rec.right] - rec_id = str(rec.rname) + "_(" + str(rec.left) + "-" + str(rec.right)+")" + rec_id = str(rec.rname) + "_(" + str(rec.left) + "-" + str(rec.right) + ")" SeqIO.write(SeqIO.SeqRecord(sequence, rec_id), fasta, "fasta") aln.close() fasta.close() + def PrintAll(subreferences, reference, output_dir): references_dir = os.path.join(output_dir, "references") coordinates_dir = os.path.join(output_dir, "coordinates") @@ -115,9 +115,11 @@ def PrintAll(subreferences, reference, output_dir): os.mkdir(coordinates_dir) for id, subreference in subreferences: if subreference != None: - PrintResults(subreference, reference, os.path.join(references_dir, str(id) + ".fasta"), os.path.join(coordinates_dir, str(id) + ".aln")) + PrintResults(subreference, reference, os.path.join(references_dir, str(id) + ".fasta"), + os.path.join(coordinates_dir, str(id) + ".aln")) + -def AlignToReference(datasets, sam_dir, bwa_command, log, index, threads = 1): +def AlignToReference(datasets, sam_dir, bwa_command, log, index, threads=1): if os.path.exists(sam_dir): shutil.rmtree(sam_dir) os.makedirs(sam_dir) @@ -127,6 +129,7 @@ def AlignToReference(datasets, sam_dir, bwa_command, log, index, threads = 1): sam_files = alignment.align_bwa_pe_libs(bwa_command, index, reads, dataset_work_dir, log, threads) yield (dataset_id, sam_files) + def ReadDataset(dataset_file): dataset_file = dataset_file.split(":") dataset_lines = [line.strip().split(" ") for line in open(dataset_file[0], "r").readlines() if line.strip()] @@ -135,8 +138,9 @@ def ReadDataset(dataset_file): datasets = filter(lambda x, y: x.startswith(dataset_file[1]), datasets) return datasets -def ConstructSubreferences(datasets, reference_file, output_dir, index = None, threads = 1, log = None): - bwa_command = "bin/bwa-spades" + +def ConstructSubreferences(datasets, reference_file, output_dir, index=None, threads=1, log=None): + bwa_command = "bin/spades-bwa" if log == None: log = logging.getLogger('reference_construction') log.setLevel(logging.INFO) @@ -158,14 +162,17 @@ def ConstructSubreferences(datasets, reference_file, output_dir, index = None, t support.recreate_dir(subreference_dir) support.recreate_dir(filtered_dir) log.info("Constructing subreferences") - subreferences_list = [(barcode_id, ConstructSubreferenceFromSam(barcode_sam)) for barcode_id, barcode_sam in sam_files] + subreferences_list = [(barcode_id, ConstructSubreferenceFromSam(barcode_sam)) for barcode_id, barcode_sam in + sam_files] log.info("Reading reference") reference = ReadReference(reference_file) log.info("Printing output") PrintAll([(barcode, filtered) for barcode, (filtered, subreference) in subreferences_list], reference, filtered_dir) - PrintAll([(barcode, subreference) for barcode, (filtered, subreference) in subreferences_list], reference, subreference_dir) + PrintAll([(barcode, subreference) for barcode, (filtered, subreference) in subreferences_list], reference, + subreference_dir) log.info("Subreference construction finished. See results in " + output_dir) + if __name__ == '__main__': # ConstructSubreference(sys.argv[1], "r", ReadReference(sys.argv[2]), sys.argv[3]) ConstructSubreferences(ReadDataset(sys.argv[1]), sys.argv[2], sys.argv[3], sys.argv[4], int(sys.argv[5])) diff --git a/src/SPAdes-3.10.1-Linux/share/spades/spades_pipeline/truspades/string_dist_utils.py b/src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/truspades/string_dist_utils.py similarity index 96% rename from src/SPAdes-3.10.1-Linux/share/spades/spades_pipeline/truspades/string_dist_utils.py rename to src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/truspades/string_dist_utils.py index aa481b5..d6ff654 100644 --- a/src/SPAdes-3.10.1-Linux/share/spades/spades_pipeline/truspades/string_dist_utils.py +++ b/src/SPAdes-3.14.0-Linux/share/spades/spades_pipeline/truspades/string_dist_utils.py @@ -5,7 +5,7 @@ ############################################################################ __author__ = 'anton' -import sys + def calculate_dist_table(s1, s2): n1 = len(s1) @@ -22,6 +22,7 @@ def calculate_dist_table(s1, s2): t.append(t_line) return t + def calculate_lcs_table(s1, s2): n1 = len(s1) n2 = len(s2) @@ -37,13 +38,14 @@ def calculate_lcs_table(s1, s2): t.append(t_line) return t + def lcs(s1, s2): t = calculate_dist_table(s1, s2) i = len(s1) j = len(s2) res = "" while i > 0 and j > 0: - if t[i][j] == t[i - 1][j -1] + 1: + if t[i][j] == t[i - 1][j - 1] + 1: i -= 1 j -= 1 elif t[i][j] == t[i - 1][j] + 1: @@ -56,6 +58,7 @@ def lcs(s1, s2): res = s1[i] + res return res + def dist(s1, s2): return calculate_dist_table(s1, s2)[len(s1)][len(s2)] diff --git a/src/SPAdes-3.10.1-Linux/share/spades/test_dataset/ecoli_1K_1.fq.gz b/src/SPAdes-3.14.0-Linux/share/spades/test_dataset/ecoli_1K_1.fq.gz similarity index 100% rename from src/SPAdes-3.10.1-Linux/share/spades/test_dataset/ecoli_1K_1.fq.gz rename to src/SPAdes-3.14.0-Linux/share/spades/test_dataset/ecoli_1K_1.fq.gz diff --git a/src/SPAdes-3.10.1-Linux/share/spades/test_dataset/ecoli_1K_2.fq.gz b/src/SPAdes-3.14.0-Linux/share/spades/test_dataset/ecoli_1K_2.fq.gz similarity index 100% rename from src/SPAdes-3.10.1-Linux/share/spades/test_dataset/ecoli_1K_2.fq.gz rename to src/SPAdes-3.14.0-Linux/share/spades/test_dataset/ecoli_1K_2.fq.gz diff --git a/src/SPAdes-3.10.1-Linux/share/spades/test_dataset/genes_1K.gff b/src/SPAdes-3.14.0-Linux/share/spades/test_dataset/genes_1K.gff similarity index 100% rename from src/SPAdes-3.10.1-Linux/share/spades/test_dataset/genes_1K.gff rename to src/SPAdes-3.14.0-Linux/share/spades/test_dataset/genes_1K.gff diff --git a/src/SPAdes-3.10.1-Linux/share/spades/test_dataset/genes_1K.txt b/src/SPAdes-3.14.0-Linux/share/spades/test_dataset/genes_1K.txt similarity index 100% rename from src/SPAdes-3.10.1-Linux/share/spades/test_dataset/genes_1K.txt rename to src/SPAdes-3.14.0-Linux/share/spades/test_dataset/genes_1K.txt diff --git a/src/SPAdes-3.10.1-Linux/share/spades/test_dataset/operons_1K.gff b/src/SPAdes-3.14.0-Linux/share/spades/test_dataset/operons_1K.gff similarity index 100% rename from src/SPAdes-3.10.1-Linux/share/spades/test_dataset/operons_1K.gff rename to src/SPAdes-3.14.0-Linux/share/spades/test_dataset/operons_1K.gff diff --git a/src/SPAdes-3.10.1-Linux/share/spades/test_dataset/operons_1K.txt b/src/SPAdes-3.14.0-Linux/share/spades/test_dataset/operons_1K.txt similarity index 100% rename from src/SPAdes-3.10.1-Linux/share/spades/test_dataset/operons_1K.txt rename to src/SPAdes-3.14.0-Linux/share/spades/test_dataset/operons_1K.txt diff --git a/src/SPAdes-3.10.1-Linux/share/spades/test_dataset/reference_1K.fa.gz b/src/SPAdes-3.14.0-Linux/share/spades/test_dataset/reference_1K.fa.gz similarity index 88% rename from src/SPAdes-3.10.1-Linux/share/spades/test_dataset/reference_1K.fa.gz rename to src/SPAdes-3.14.0-Linux/share/spades/test_dataset/reference_1K.fa.gz index 05cd8ef..9062ac3 100644 Binary files a/src/SPAdes-3.10.1-Linux/share/spades/test_dataset/reference_1K.fa.gz and b/src/SPAdes-3.14.0-Linux/share/spades/test_dataset/reference_1K.fa.gz differ diff --git a/src/SPAdes-3.10.1-Linux/share/spades/test_dataset_plasmid/pl1.fq.gz b/src/SPAdes-3.14.0-Linux/share/spades/test_dataset_plasmid/pl1.fq.gz similarity index 100% rename from src/SPAdes-3.10.1-Linux/share/spades/test_dataset_plasmid/pl1.fq.gz rename to src/SPAdes-3.14.0-Linux/share/spades/test_dataset_plasmid/pl1.fq.gz diff --git a/src/SPAdes-3.10.1-Linux/share/spades/test_dataset_plasmid/pl2.fq.gz b/src/SPAdes-3.14.0-Linux/share/spades/test_dataset_plasmid/pl2.fq.gz similarity index 100% rename from src/SPAdes-3.10.1-Linux/share/spades/test_dataset_plasmid/pl2.fq.gz rename to src/SPAdes-3.14.0-Linux/share/spades/test_dataset_plasmid/pl2.fq.gz diff --git a/src/SPAdes-3.10.1-Linux/share/spades/test_dataset_truspades/A_R1.fastq.gz b/src/SPAdes-3.14.0-Linux/share/spades/test_dataset_truspades/A_R1.fastq.gz similarity index 100% rename from src/SPAdes-3.10.1-Linux/share/spades/test_dataset_truspades/A_R1.fastq.gz rename to src/SPAdes-3.14.0-Linux/share/spades/test_dataset_truspades/A_R1.fastq.gz diff --git a/src/SPAdes-3.10.1-Linux/share/spades/test_dataset_truspades/A_R2.fastq.gz b/src/SPAdes-3.14.0-Linux/share/spades/test_dataset_truspades/A_R2.fastq.gz similarity index 100% rename from src/SPAdes-3.10.1-Linux/share/spades/test_dataset_truspades/A_R2.fastq.gz rename to src/SPAdes-3.14.0-Linux/share/spades/test_dataset_truspades/A_R2.fastq.gz diff --git a/src/SPAdes-3.10.1-Linux/share/spades/test_dataset_truspades/B_R1.fastq.gz b/src/SPAdes-3.14.0-Linux/share/spades/test_dataset_truspades/B_R1.fastq.gz similarity index 100% rename from src/SPAdes-3.10.1-Linux/share/spades/test_dataset_truspades/B_R1.fastq.gz rename to src/SPAdes-3.14.0-Linux/share/spades/test_dataset_truspades/B_R1.fastq.gz diff --git a/src/SPAdes-3.10.1-Linux/share/spades/test_dataset_truspades/B_R2.fastq.gz b/src/SPAdes-3.14.0-Linux/share/spades/test_dataset_truspades/B_R2.fastq.gz similarity index 100% rename from src/SPAdes-3.10.1-Linux/share/spades/test_dataset_truspades/B_R2.fastq.gz rename to src/SPAdes-3.14.0-Linux/share/spades/test_dataset_truspades/B_R2.fastq.gz diff --git a/src/SPAdes-3.10.1-Linux/share/spades/truspades_manual.html b/src/SPAdes-3.14.0-Linux/share/spades/truspades_manual.html similarity index 100% rename from src/SPAdes-3.10.1-Linux/share/spades/truspades_manual.html rename to src/SPAdes-3.14.0-Linux/share/spades/truspades_manual.html diff --git a/src/SPAdes-3.14.0-Linux/share/spaligner/spaligner_config.yaml b/src/SPAdes-3.14.0-Linux/share/spaligner/spaligner_config.yaml new file mode 100644 index 0000000..bda82a5 --- /dev/null +++ b/src/SPAdes-3.14.0-Linux/share/spaligner/spaligner_config.yaml @@ -0,0 +1,37 @@ +output_format: tsv + +hits_generation: + internal_length_cutoff: 200 + path_limit_stretching: 1.3 + path_limit_pressing: 0.6 + max_path_in_chaining: 15000 + max_vertex_in_chaining: 5000 + +################## nucleotide sequences alignment parameters + +run_dijkstra: true +restore_ends: true + +gap_closing: + queue_limit: 1000000 + iteration_limit: 1000000 + updates_limit: 1000000 + find_shortest_path: false + restore_mapping: false + penalty_ratio: 200 + max_ed_proportion: 3 # max_ed = min(ed_upper_bound, max(sequence_length/max_ed_proportion, ed_lower_bound)) + ed_lower_bound: 500 + ed_upper_bound: 2000 + max_gs_states: 120000000 + +ends_recovering: + queue_limit: 1000000 + iteration_limit: 1000000 + updates_limit: 1000000 + find_shortest_path: true + restore_mapping: false + penalty_ratio: 0.1 + max_ed_proportion: 5 # max_ed = min(ed_upper_bound, max(sequence_length/max_ed_proportion, ed_lower_bound)) + ed_lower_bound: 500 + ed_upper_bound: 2000 + max_restorable_length: 5000 \ No newline at end of file diff --git a/src/bowtie2-2.2.9/bowtie2-buildc b/src/bowtie2-2.2.9/bowtie2-buildc new file mode 100644 index 0000000..0f8ab86 Binary files /dev/null and b/src/bowtie2-2.2.9/bowtie2-buildc differ