VHH免疫测序数据分析1

9 篇文章 3 订阅

目的简介

根据VHH噬菌体展示筛选结果建库测序(PE300),初步分析结果如下:
1、拼接reads,得到contigs,然后统计unique contig的数量
2、注释拼接得到的contigs得到framework区和CDR区
3、分类形成clonotype:V&J reference gene相同,CDR3长度相同,且相似度高于80%
4、根据上述注释信息统计clonotype count对应的unique contig count 、unique protein count 等等信息

工具

  • igblast
  • cd-hit
  • pandas

实操

1.质控

mkdir Fastqc
ls | grep "fq.gz$" |while read id ;do (fastqc -q -t 16 -o ./Fastqc $id );done &
mkdir multiqc
multiqc ./ -o ./multiqc
#3'端碱基质量有些较差,准备把低于20的碱基及其之后的序列修剪掉

在这里插入图片描述

2.修剪

ls |grep "R1_001" > R1.txt
ls |grep "R2_001" > R2.txt
paste R1.txt R2.txt > list.txt
mkdir trim && cd trim
cat ../list.txt |while read id ;do (arr=($id);java -jar /public/home/djs/software/Trimmomatic-0.39/trimmomatic-0.39.jar PE -phred33 ../${arr[0]} ../${arr[1]} -baseout ${arr[0]}.trim.fastq.gz ILLUMINACLIP:/public/home/djs/software/Trimmomatic-0.39/adapters/TruSeq3-PE.fa:2:30:10:8:true LEADING:3 TRAILING:3 SLIDINGWINDOW:4:20 MINLEN:150 );done
#修剪后的结果在trim文件夹,1p为R1修剪后的结果,2p为R2修剪后的结果

3.合并双端reads

conda search pandaseq
conda install pandaseq
ls |grep "1P.fastq.gz" > 1p.txt
ls |grep "2P.fastq.gz" > 2p.txt
paste 1p.txt 2p.txt > list.txt
cat list.txt |while read id ;do arr=($id);pandaseq -f ${arr[0]} -r ${arr[1]} -F -N -o 12 -w ./${arr[0]}.merge;done #结果未压缩
cat list.txt |while read id ;do arr=($id);pandaseq -f ${arr[0]} -r ${arr[1]} -F -N -o 12 -W ./${arr[0]}.merge;done #结果文件是bzip2格式
#output后缀为merge.out.fq

4.DNA level 计数

#fq to txt
#cut PCR primer
ls |grep "merge.out" | while read id ;do awk '{if(NR%4==2) print $0}' $id |sed 's/.*GGTTTCGCTACCGTGGCCCAGGCGGCC/GGTTTCGCTACCGTGGCCCAGGCGGCC/g' |sed 's/GAACCCAAGACA.*//g' > ${id}.trim_vector;done
ls |grep "trim_vector" | while read id ;do awk '{a=length($0);print $0"\t"a}' $id | sort -n -k2 |cut -f1 > ${id}.sort ;done 
#uniq只与临近的序列比较,所以根据序列的长度进行了一次排序
ls |grep "sort" | while read id ;do cat $id |uniq -c |sort -n -k1 |awk '{print ">"NR"+"$1;print $2}' > ${id}.sort.sort ;done 
#此时已经统计好unique DNA sequence,并且生成了fa文件

5.igblast

mkdir output_igblast
ls /public/home/djs/huiyu/BCR_NGS/trim2 |grep "sort.sort" |while read id ;do
./bin/igblastn -germline_db_V ./database/alpaca_v_igblast.fasta  -germline_db_D ./database/alpaca_d_igblast.fasta -germline_db_J ./database/alpaca_j_igblast.fasta  \
-query ~/BCR_NGS/trim2/$id -organism alpaca  \
-auxiliary_data ./optional_file/alpaca_gl.aux \
-show_translation -extend_align5end -extend_align3end  \
-num_alignments_V 1 -num_alignments_D 1 -num_alignments_J 1 \
-outfmt 19  > ~/trim2/output_igblast/$id.txt ;done

6.根据igblast结果统计unique protein 然后根据CDR3聚类并统计unique cluster

#将CDR3信息转成fa文件
/usr/bin/python3
import numpy as np
import pandas as pd
df1 = pd.read_csv("NB244-R1-H_S1_L001_R1_001.fastq.gz.trim_1P.fastq.gz.merge.out.trim_vector.sort.sort.sort.txt",sep='\t')
df2 = df1[(df1["stop_codon"] == 'F') & (df1["vj_in_frame"] == 'T') & (df1["v_frameshift"] == 'F') & (df1["productive"] == 'T') & (df1["complete_cdj"] == 'T')]
df3 = df2.sort_values(["v_call","j_call"])
df3.to_csv("test1.txt",sep="\t")
exit()
#聚类
awk 'BEGIN{FS="\t"} NR > 1 {print ">"$2;print $49}' test1.txt  > cluster.fa
~/software/cd-hit-v4.8.1-2019-0228/cd-hit -i cluster.fa -o cluster.test -g 1 -c 0.8 -n 3 -d 0 -l 3  -s 0.99 -S 0 -U 0 -sc 1
#整理聚类信息
cat cluster.test.clstr |sed 's/.*>/>/g'|sed 's/\..*//g' |tr -d '\n' | sed 's/>C/\nC/g' | awk 'BEGIN{FS=">"} {for(i=2;i<=NF;i++){print $1"\t"$i}}' > test.cluster.txt
sed -i '1i cluster_id\tsequence_id' test.cluster.txt
#igblast结果精简版
cut -f 2,3,11,13,16,49 test1.txt > test2.txt
#到此结果文件基本整理完毕,接下来开始统计unique protein /unique cluster
/usr/bin/python3

import numpy as np
import pandas as pd

df1 = pd.read_csv("test2.txt",sep="\t") #包含精简的i给blast结果
df2 = pd.read_csv("test.cluster.txt",sep="\t") #聚类信息
df3 = pd.merge(df1,df2,on="sequence_id")
#reads拼接后其命名方式:sort后的NR+count
#所以在此步骤根据sequence id还原DNA count
df3["DNA_count"] = df3["sequence_id"].map(lambda x:x.split('+')[1])
df3.DNA_count = df3.DNA_count.astype("int64")
#df3.DNA_count.dtype
#一般BCR-seq中的clonotype定义为V & J的germline reference相同,CDR3长度相同且相似度大于80%
df4 = df3.groupby(["v_call","j_call","cluster_id"]).DNA_count.sum() #xxxx
df4.name = "Cluster_count"
df5 = pd.merge(df3,df4,on=["v_call","j_call","cluster_id"])
#你可以发现cluster id相同但是V & J germline reference不一定相同
#因为我们是根据CDR3聚类得到的cluster id,所以后续还需要继续整理
#但是此处的Cluster count 是根据V & J的germline reference相同,CDR3长度相同且相似度大于80%统计的
df5["clonotype"] = df5["cluster_id"].map(lambda x:x.split(" ")[1])+"@"+df5["v_call"].map(lambda x:x.split("*")[0])+"@"+df5["j_call"].map(lambda x:x.split("*")[0])
#
df6 = df3.groupby("sequence_alignment_aa").agg("sum")["DNA_count"]
df6.name = "AA_count"
df7 = pd.merge(df5,df6,on="sequence_alignment_aa")
#上述步骤得到unique protein count
df7 = df7.loc[:,["clonotype","cluster_id","cdr3_aa","Cluster_count","AA_count","DNA_count","v_call","j_call","sequence_alignment_aa","sequence"]]
df7.columns = ["Clonotype_ID","Cluster_ID","CDR3_AA","Cluster_count","AA_count","DNA_count","V","J","AA_seq","DNA_seq"]
df7.Cluster_ID = df7.Cluster_ID.map(lambda x:x.split(' ')[1])
#
df8 = df7.groupby("AA_seq",as_index=False).max()
df8 = df8.loc[:,["Clonotype_ID","Cluster_ID","CDR3_AA","Cluster_count","AA_count","DNA_count","V","J","AA_seq","DNA_seq"]]
df8 = df8.sort_values("AA_count",ascending=False)
#
df9 = df7.sort_values(["Cluster_count","AA_count","DNA_count"],ascending=False)
df9 = df9.drop_duplicates(subset="Clonotype_ID", keep='first')
#
with pd.ExcelWriter("result.xlsx") as writer:
    df7.to_excel(writer, sheet_name="all",index=False)
    df8.to_excel(writer, sheet_name="AA_count",index=False)
    df9.to_excel(writer, sheet_name="Cluster_count",index=False)

7.脚本

#sort后缀文件为igblast结果
ls |grep "sort" | while read id ;do bash /public/home/djs/**/NGS.sh $(pwd) ${id};done
#NGS.sh
#!/usr/bin/bash

file=${2}
dir=${1}

/usr/bin/python3 /public/home/djs/**/NGS_1.py ${dir} ${file}

cut -f 2,3,11,13,16,49 ${dir}/test1.txt > ${dir}/test2.txt

awk 'BEGIN{FS="\t"} NR > 1 {print ">"$2;print $49}' ${dir}/test1.txt  > ${dir}/cluster.fa

~/software/cd-hit-v4.8.1-2019-0228/cd-hit -i ${dir}/cluster.fa -o ${dir}/cluster.test -g 1 -c 0.8 -n 3 -d 0 -l 3  -s 0.99 -S 0 -U 0 -sc 1

cat ${dir}/cluster.test.clstr |sed 's/.*>/>/g'|sed 's/\..*//g' |tr -d '\n' | sed 's/>C/\nC/g' | awk 'BEGIN{FS=">"} {for(i=2;i<=NF;i++){print $1"\t"$i}}' > ${dir}/test.cluster.txt

sed -i '1i cluster_id\tsequence_id' ${dir}/test.cluster.txt

/usr/bin/python3 /public/home/djs/**/NGS_2.py ${dir} ${file}

#NGS_1.py
#!/usr/bin/python3

import sys
import os
import numpy as np
import pandas as pd

Dir = sys.argv[1]
File = sys.argv[2]
os.chdir(Dir)
df1 = pd.read_csv(File,sep='\t')
df2 = df1[(df1["stop_codon"] == 'F') & (df1["vj_in_frame"] == 'T') & (df1["v_frameshift"] == 'F') & (df1["productive"] == 'T') & (df1["complete_vdj"] == 'T')]
df3 = df2.sort_values(["v_call","j_call"])
df3.to_csv("test1.txt",sep="\t")

#NGS_2.py
#!/usr/bin/python3

import sys
import os
import numpy as np
import pandas as pd

Dir = sys.argv[1]
File = sys.argv[2]
os.chdir(Dir)

df1 = pd.read_csv("test2.txt",sep="\t")
df2 = pd.read_csv("test.cluster.txt",sep="\t")
df3 = pd.merge(df1,df2,on="sequence_id")
df3["DNA_count"] = df3["sequence_id"].map(lambda x:x.split('+')[1])
df3.DNA_count = df3.DNA_count.astype("int64")
#df3.DNA_count.dtype
df4 = df3.groupby(["v_call","j_call","cluster_id"]).DNA_count.sum() #xxxx
df4.name = "Cluster_count"
df5 = pd.merge(df3,df4,on=["v_call","j_call","cluster_id"])
#你可以发现cluster id相同但是V & J germline reference不一定相同
#因为我们是根据CDR3聚类得到的cluster id,所以后续还需要继续整理
#但是此处的Cluster count 是根据V & J的germline reference相同,CDR3长度相同且相似度大于80%统计的
df5["clonotype"] = df5["cluster_id"].map(lambda x:x.split(" ")[1])+"@"+df5["v_call"].map(lambda x:x.split("*")[0])+"@"+df5["j_call"].map(lambda x:x.split("*")[0])

df6 = df3.groupby("sequence_alignment_aa").agg("sum")["DNA_count"]
df6.name = "AA_count"
df7 = pd.merge(df5,df6,on="sequence_alignment_aa")

df7 = df7.loc[:,["clonotype","cluster_id","cdr3_aa","Cluster_count","AA_count","DNA_count","v_call","j_call","sequence_alignment_aa","sequence"]]
df7.columns = ["Clonotype_ID","Cluster_ID","CDR3_AA","Cluster_count","AA_count","DNA_count","V","J","AA_seq","DNA_seq"]
df7.Cluster_ID = df7.Cluster_ID.map(lambda x:x.split(' ')[1])
#df7.to_csv(File+"result.txt",sep="\t")
#df7.to_excel(File+".all_result.xlsx",index=False,sheet_name="sheet1")

df8 = df7.groupby("AA_seq",as_index=False).max()
df8 = df8.loc[:,["Clonotype_ID","Cluster_ID","CDR3_AA","Cluster_count","AA_count","DNA_count","V","J","AA_seq","DNA_seq"]]
df8 = df8.sort_values("AA_count",ascending=False)

#修改
df9 = df7.sort_values(["Cluster_count","AA_count","DNA_count"],ascending=False)
df9 = df9.drop_duplicates(subset="Clonotype_ID", keep='first')

with pd.ExcelWriter(File+".result.xlsx") as writer:
    df7.to_excel(writer, sheet_name="all",index=False)
    df8.to_excel(writer, sheet_name="AA_count",index=False)
	df9.to_excel(writer, sheet_name="Cluster_count",index=False)

8.最后

如果觉得对你有帮助,可以请作者喝杯咖啡。
加糖的咖啡

评论 7
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值