forked from chiulab/surpi
-
Notifications
You must be signed in to change notification settings - Fork 0
/
preprocess_ncores.sh
executable file
·157 lines (130 loc) · 5.11 KB
/
preprocess_ncores.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
#!/bin/bash
#
# preprocess_ncores.sh
#
# This script runs preprocessing across multiple cores (FASTA/FASTQ header modification, quality filtering, adapter trimming, and low-complexity filtering)
# Chiu Laboratory
# University of California, San Francisco
# January, 2014
#
#
# Copyright (C) 2014 Charles Chiu - All Rights Reserved
# SURPI has been released under a modified BSD license.
# Please see license file for details.
scriptname=${0##*/}
if [ $# != 12 ]; then
echo "Usage: $scriptname <R1 FASTQ file> <S/I quality> <Y/N uniq> <length cutoff; 0 for no length cutoff> <# of cores> <free cache memory cutoff in GB> <Y/N keep short reads> <adapter_set> <start_nt> <crop_length> <temporary_files_directory> <quality_cutoff>"
exit
fi
###
inputfile=$1
quality=$2
run_uniq=$3
length_cutoff=$4
cores=$5
cache_reset=$6
keep_short_reads=$7
adapter_set=$8
start_nt=$9
crop_length=${10}
temporary_files_directory=${11}
quality_cutoff=${12}
###
if [ ! -f $inputfile ]
then
echo "$inputfile not found!"
exit
fi
freemem=$(free -g | awk '{print $4}' | head -n 2 | tail -1)
echo -e "$(date)\t$scriptname\tThere is $freemem GB available free memory...[cutoff=$free_cache_cutoff GB]"
if [ $freemem -lt $free_cache_cutoff ]
then
echo -e "$(date)\t$scriptname\tClearing cache..."
dropcache
fi
START=$(date +%s)
echo -e "$(date)\t$scriptname\tSplitting $inputfile..."
let "numlines = `wc -l $inputfile | awk '{print $1}'`"
let "FASTQentries = numlines / 4"
echo -e "$(date)\t$scriptname\tThere are $FASTQentries FASTQ entries in $inputfile"
let "LinesPerCore = numlines / $cores"
let "FASTQperCore = LinesPerCore / 4"
let "SplitPerCore = FASTQperCore * 4"
echo -e "$(date)\t$scriptname\twill use $cores cores with $FASTQperCore entries per core"
split -l $SplitPerCore $inputfile
END_SPLIT=$(date +%s)
diff_SPLIT=$(( END_SPLIT - START ))
echo -e "$(date)\t$scriptname\tDone splitting: "
echo -e "$(date)\t$scriptname\tSPLITTING took $diff_SPLIT seconds"
echo -e "$(date)\t$scriptname\tRunning preprocess script for each chunk..."
for f in x??
do
mv $f $f.fastq
echo -e "$(date)\t$scriptname\tpreprocess.sh $f.fastq $quality N $length_cutoff $keep_short_reads $adapter_set $start_nt $crop_length $temporary_files_directory >& $f.preprocess.log &"
preprocess.sh $f.fastq $quality N $length_cutoff $keep_short_reads $adapter_set $start_nt $crop_length $temporary_files_directory $quality_cutoff >& $f.preprocess.log &
done
for job in `jobs -p`
do
wait $job
done
echo -e "$(date)\t$scriptname\tDone preprocessing for each chunk..."
nopathf2=${1##*/}
basef2=${nopathf2%.fastq}
rm -f $basef2.cutadapt.fastq
rm -f $basef2.preprocessed.fastq
rm -f $basef2*.dusted.bad.fastq
for f in x??.fastq
do
nopathf=${f##*/}
basef=${nopathf%.fastq}
cat $basef.preprocess.log >> $basef2.preprocess.log
rm -f $basef.preprocess.log
cat $basef.modheader.cutadapt.summary.log >> $basef2.cutadapt.summary.log
rm -f $basef.modheader.cutadapt.summary.log
cat $basef.modheader.adapterinfo.log >> $basef2.adapterinfo.log
rm -f $basef.modheader.adapterinfo.log
cat $basef.cutadapt.fastq >> $basef2.cutadapt.fastq
rm -f $basef.cutadapt.fastq
cat $basef.cutadapt.cropped.fastq.log >> $basef2.cutadapt.cropped.fastq.log
rm -f $basef.cutadapt.cropped.fastq.log
cat $basef.preprocessed.fastq >> $basef2.preprocessed.fastq
rm -f $basef.preprocessed.fastq
cat $basef.cutadapt.cropped.dusted.bad.fastq >> $basef2.cutadapt.cropped.dusted.bad.fastq
rm -f $basef.cutadapt.cropped.dusted.bad.fastq
rm -f $f
rm -f $basef.modheader.fastq
rm -f $basef.cutadapt.summary.log
rm -f $basef.adapterinfo.log
rm -f $basef.cutadapt.cropped.fastq
done
echo -e "$(date)\t$scriptname\tDone concatenating output..."
if [ $run_uniq == "Y" ]; # selecting unique reads
then
echo -e "$(date)\t$scriptname\tSelecting unique reads"
START_UNIQ=$(date +%s)
# selecting unique reads
sed "n;n;n;d" $basef2.preprocessed.fastq | sed "n;n;d" | sed "s/^@/>/g" > $basef2.preprocessed.fasta
gt sequniq -force -o $basef2.uniq.fasta $basef2.preprocessed.fasta
extractHeaderFromFastq.csh $basef2.uniq.fasta FASTA $basef2.preprocessed.fastq $basef2.uniq.fastq
cp -f $basef2.uniq.fastq $basef2.preprocessed.fastq
END_UNIQ=$(date +%s)
diff_UNIQ=$(( END_UNIQ - START_UNIQ ))
echo -e "$(date)\t$scriptname\tUNIQ took $diff_UNIQ seconds"
else
echo -e "$(date)\t$scriptname\tIncluding duplicates (did not run UNIQ)"
fi
END=$(date +%s)
diff_TOTAL=$(( END - START ))
let "avgtime1=`grep CUTADAPT $basef2.preprocess.log | awk '{print $12}' | sort -n | awk '{ a[i++]=$1} END {print a[int(i/2)];}'`"
echo -e "$(date)\t$scriptname\tmedian CUTADAPT time per core: $avgtime1 seconds"
if [ $run_uniq = "Y" ]; then
let "avgtime2 = $diff_UNIQ"
echo -e "$(date)\t$scriptname\tUNIQ time: $diff_UNIQ seconds"
else
let "avgtime2=0"
fi
let "avgtime3=`grep DUST $basef2.preprocess.log | awk '{print $12}' | sort -n | awk '{ a[i++]=$1} END {print a[int(i/2)];}'`"
echo -e "$(date)\t$scriptname\tmedian DUST time per core: $avgtime3 seconds"
let "totaltime = diff_SPLIT + avgtime1 + avgtime2 + avgtime3"
echo -e "$(date)\t$scriptname\tTOTAL TIME: $totaltime seconds"
echo -e "$(date)\t$scriptname\tTOTAL CLOCK TIME (INCLUDING OVERHEAD): $diff_TOTAL seconds"