Made snippet input- and output files more harmonised. #6

Merged
H.C. Donker merged 1 commits from feature/harmonised-steps into master 2021-02-23 09:17:17 +01:00
5 changed files with 57 additions and 36 deletions

View File

@ -1,5 +1,12 @@
# The command to do a FastQC on a fastq file is #!/bin/bash
file="file_to_analyse.fq.gz" R1="sample1_R1.fastq.gz"
fastqc_out="./path/to/fastqc/output/dir" R2="sample1_R2.fastq.gz"
fastqc -o "$fastqc_out" "$file" PROJECT_DIRECTORY="/groups/umcg-griac/tmp01/rawdata/$(whoami)/rnaseq"
FASTQC_OUT="${PROJECT_DIRECTORY}/step1/"
mkdir -p "${FASTQC_OUT}"
# Run FastQC on paired-end data.
fastqc \
-o "${FASTQC_OUT}" \
"${R1}" "${R2}"

View File

@ -1,9 +1,12 @@
#!/bin/bash #!/bin/bash
#
# reference: http://www.usadellab.org/cms/?page=trimmomatic # Reference: http://www.usadellab.org/cms/?page=trimmomatic
module load Trimmomatic module load Trimmomatic
PROJECT_DIRECTORY="/groups/umcg-griac/tmp01/rawdata/$(whoami)/rnaseq"
FASTQ_OUT="${PROJECT_DIRECTORY}/step2/"
mkdir -p "${FASTQ_OUT}"
# Adapters can be found at # Adapters can be found at
@ -11,15 +14,25 @@ module load Trimmomatic
# But should be verified with FastQC, or in another way. # But should be verified with FastQC, or in another way.
# (Example) Paired end # Trimmomatic example Paired end data.
#
# Flags:
# - ILLUMINACLIP: Cut adapter and other illumina-specific sequences from the
# read.
# - SLIDINGWINDOW: Perform a sliding window trimming, cutting once the average
# quality within the window falls below a threshold.
# - LEADING: Cut bases off the start of a read, if below a threshold quality.
# - TRAILING: Cut bases off the end of a read, if below a threshold quality.
# - HEADCROP: Cut the specified number of bases from the start of the read.
# - MINLEN: Drop the read if it is below a specified length.
java -jar $EBROOTTRIMMOMATIC/trimmomatic.jar PE \ java -jar $EBROOTTRIMMOMATIC/trimmomatic.jar PE \
-phred33 \ -phred33 \
input_forward.fq.gz \ sample1_R1.fastq.gz \
input_reverse.fq.gz \ sample1_R2.fastq.gz \
output_forward_paired.fq.gz \ "${FASTQ_OUT}/sample1_R1_paired.fastq.gz" \
output_forward_unpaired.fq.gz \ "${FASTQ_OUT}/sample1_R1_unpaired.fastq.gz" \
output_reverse_paired.fq.gz \ "${FASTQ_OUT}/sample1_R2_paired.fastq.gz" \
output_reverse_unpaired.fq.gz \ "${FASTQ_OUT}/sample1_R2_unpaired.fastq.gz" \
ILLUMINACLIP: TruSeq3-PE.fa:2:30:10 \ ILLUMINACLIP: TruSeq3-PE.fa:2:30:10 \
LEADING:3 \ LEADING:3 \
TRAILING:3 \ TRAILING:3 \
@ -28,11 +41,11 @@ java -jar $EBROOTTRIMMOMATIC/trimmomatic.jar PE \
MINLEN:50 MINLEN:50
# (Example) Single end # Example single end data.
java -jar $EBROOTTRIMMOMATIC/trimmomatic.jar SE \ java -jar $EBROOTTRIMMOMATIC/trimmomatic.jar SE \
-phred33 \ -phred33 \
input.fq.gz \ sample1.fastq.gz \
output.fq.gz \ output.fastq.gz \
ILLUMINACLIP:TruSeq3-SE:2:30:10 \ ILLUMINACLIP:TruSeq3-SE:2:30:10 \
LEADING:3 \ LEADING:3 \
TRAILING:3 \ TRAILING:3 \

View File

@ -2,25 +2,26 @@
# #
# Align reads against reference genome. # Align reads against reference genome.
STORAGE="/groups/umcg-griac/tmp04/rawdata/$(whoami)/step3" PROJECT_DIRECTORY="/groups/umcg-griac/tmp01/rawdata/$(whoami)/rnaseq"
# Store genome index in this location:. # Store genome index in this location:.
GENOME_INDEX="${STORAGE}/genome_index" GENOME_INDEX="${PROJECT_DIRECTORY}/step3/genome_index"
mkdir -p "${GENOME_INDEX}" mkdir -p "${GENOME_INDEX}"
# Store the generated `Aligned.sortedByCoord.out.bam` in this dir. # Store the generated `Aligned.sortedByCoord.out.bam` in this dir.
ALIGNMENT_OUTPUT="${STORAGE}/alignment" ALIGNMENT_OUTPUT="${PROJECT_DIRECTORY}/step3/alignment/"
mkdir -p "${ALIGNMENT_OUTPUT}" mkdir -p "${ALIGNMENT_OUTPUT}"
# 1) Generate genome index. # 1) Generate genome index.
# #
# N.B.: # N.B.:
# - We're assuming a read size of 100 bp (--sjdbOverhang 100). Refer back to the # - We're assuming a read size of 100 bp (--sjdbOverhang 100). An alternative
# cut-off is 150, for low-input methods. In general, refer back to the
# previous quality control steps if you are unsure about the size. In case of # previous quality control steps if you are unsure about the size. In case of
# reads of varying length, the ideal value is max(ReadLength)-1. # reads of varying length, the ideal value is max(ReadLength)-1.
# - We're using gzip compressed reference data (--readFilesCommand zcat), i.e., # - We're using gzip compressed reference data (--readFilesCommand zcat), i.e.,
# .gtf.gz and fa.gz. If not, you can remove the `zcat` flag. # .gtf.gz and fa.gz. If not, you can remove the `zcat` flag.
# Storage location reference data (in this case on calculon). # Storage location reference data (in this case on Gearshift).
REFERENCE_DATA="/groups/umcg-griac/prm02/rawdata/reference/genome" REFERENCE_DATA="/groups/umcg-griac/prm03/rawdata/reference/genome"
GTF_FILE="${REFERENCE_DATA}/Homo_sapiens.GRCh38.100.gtf.gz" GTF_FILE="${REFERENCE_DATA}/Homo_sapiens.GRCh38.100.gtf.gz"
FASTA_FILE="${REFERENCE_DATA}/Homo_sapiens.GRCh38.dna.primary_assembly.fa.gz" FASTA_FILE="${REFERENCE_DATA}/Homo_sapiens.GRCh38.dna.primary_assembly.fa.gz"
@ -40,9 +41,9 @@ STAR \
# - We are assuming paired-end, gzip compressed (--readFilesCommand zcat) FastQ # - We are assuming paired-end, gzip compressed (--readFilesCommand zcat) FastQ
# files. # files.
# THe compressed paired-end FastQ's that we are aligning. # The compressed, paired-end, FastQ's after trimming (step 2).
R1="sample1_R1.fastq.gz" R1="${PROJECT_DIRECTORY}/step2/sample1_R1_paired.fastq.gz"
R2="sample1_R2.fastq.gz" R2="${PROJECT_DIRECTORY}/step2/sample1_R2_paired.fastq.gz"
STAR \ STAR \
--runThreadN 8 \ --runThreadN 8 \

View File

@ -1,5 +1,5 @@
#!/bin/bash #!/bin/bash
module load multiqc module load multiqc
# assuming you have all your fastQC result files in the ./fastQCresults folder PROJECT_DIRECTORY="/groups/umcg-griac/tmp01/rawdata/$(whoami)/rnaseq"
multiqc ./fastQCresults multiqc "${PROJECT_DIRECTORY}"

View File

@ -18,7 +18,7 @@ do.scale = FALSE
# The analysis # The analysis
# We ise prcomp to calculate the PCAs. Afterwards you should plot the results. # We use prcomp to calculate the PCAs. Afterwards you should plot the results.
norm.expr.data <- expression.data %>% norm.expr.data <- expression.data %>%
tibble::column_to_rownames("Gene") tibble::column_to_rownames("Gene")
norm.expr.data <- norm.expr.data[rowSums(norm.expr.data) >= 10,] %>% norm.expr.data <- norm.expr.data[rowSums(norm.expr.data) >= 10,] %>%
@ -37,8 +37,8 @@ norm.expr.data.pcs <- norm.expr.data %>%
) )
# Write summary of PCAs to files # Write summary of PCAs to files
pcs.summery <- summary(norm.expr.data.pcs) pcs.summary <- summary(norm.expr.data.pcs)
pcs.summery$importance %>% pcs.summary$importance %>%
t() %>% t() %>%
as.data.frame() %>% as.data.frame() %>%
tibble::rownames_to_column("PC.name") %>% tibble::rownames_to_column("PC.name") %>%
@ -46,7 +46,7 @@ pcs.summery$importance %>%
file.path(results.dir.pca, "importance.csv") file.path(results.dir.pca, "importance.csv")
) )
pcs.summery$x %>% pcs.summary$x %>%
t() %>% t() %>%
as.data.frame() %>% as.data.frame() %>%
tibble::rownames_to_column("ensembl.id") %>% tibble::rownames_to_column("ensembl.id") %>%
@ -54,7 +54,7 @@ pcs.summery$x %>%
file.path(results.dir.pca, "values.csv") file.path(results.dir.pca, "values.csv")
) )
pcs.summery$rotation %>% pcs.summary$rotation %>%
t() %>% t() %>%
as.data.frame() %>% as.data.frame() %>%
tibble::rownames_to_column("sample.id") %>% tibble::rownames_to_column("sample.id") %>%
@ -63,15 +63,15 @@ pcs.summery$rotation %>%
) )
data.frame( data.frame(
rownames = names(pcs.summery$center), rownames = names(pcs.summary$center),
center = pcs.summery$center, center = pcs.summary$center,
scale = pcs.summery$scale scale = pcs.summary$scale
) %>% ) %>%
readr::write_csv( readr::write_csv(
file.path(results.dir.pca, "rest.csv") file.path(results.dir.pca, "rest.csv")
) )
# Not saved: pcs.summery$sdev, # Not saved: pcs.summary$sdev,
# Next thing to do: # Next thing to do: