import click
import pandas as pd
import os
@click.command()
@click.option('-r', '--reference', help='accession_file(nucl_gb.accession2taxid/prot.accession2taxid /...)', required=True)
# @click.option('-i', '--inputfile', help='search_file(number_nt_virus_blastn.m8/number_nt_bacteria_blastn.m8 /...)', required=True)
@click.option('-op', '--outputpath', help='outputpath_name(/home/lijing/all_test/test_202112/ /...)', required=True)
@click.option('-id', '--id_of_data', help='data_id_name(K200004870_L01_60 /...)', required=True)
# @click.option('-i', '--input_hostfilter_file', help='input_hostfilter_file_path_name(/home/lijing/all_test/test_202112/tmp_star_trimmomatic_priceseq_dedup_lzw_bowtie2_gsnap.fasta |...)', required=True)
@click.option('-d', '--docker', help='docker_name(823fa78ed660 |...)', required=True)
@click.option('-ga', '--gsnap_aligndatabase',
help='gsnap_aligndatabase_name(/idseqflow-dockerfile-container-share/20211109/GMAP-GSNAP_data/nt_bacteria_db |...)',
required=True)
@click.option('-gai', '--gsnap_aligndatabase_index',
help='gsnap_aligndatabase_index_name(nt_bacteria_k16 | nt_virus_k16 | nt_standard_db |...)',
required=True)
@click.option('-n', '--inputfile_name', help='inputfile_name(K200004870_L01_60.fq.gz |...)', required=True)
@click.option('-i', '--inputfile', help='raw_data(/home/lijing/nt_nr_database_division1/K200004870_L01_60.fq.gz |...)',
required=True)
@click.option('-sh', '--star_hostdatabase',
help='star_hostdatabase_name(/idseqflow-dockerfile-container-share/20211109/star_data/genomeDir |...)',
required=True)
@click.option('-bh', '--bowtie2_hostdatabase',
help='bowtie2_hostdatabase_name(/idseqflow-dockerfile-container-share/20211109/bowtie2/Homo_sapiens.GRCh38.dna.primary_assembly |...)',
required=True)
@click.option('-gh', '--gsnap_hostdatabase',
help='gsnap_hostdatabase_name(/idseqflow-dockerfile-container-share/20211109/GMAP-GSNAP_data/gsnap |...)',
required=True)
@click.option('-ghi', '--gsnap_hostdatabase_index',
help='gsnap_hostdatabase_index_name(Homo_sapiens.GRCh38.dna.primary_assembly_k16 |...)',
required=True)
# @click.option('-dp', '--docker_path',
# help='docker_path(823fa78ed660:/idseqflow-dockerfile-container-share/20211109/testdata/ |...)',
# required=True)
# @click.option('-v', '--validate', help='docker_path(823fa78ed660:/idseqflow-dockerfile-container-share/20211109/testdata/...)', required=True)
# @click.option('-dpgf', '--docker_path_gzfile',
# help='docker_path_gzfile(/idseqflow-dockerfile-container-share/20211109/testdata/K200004870_L01_60.fq.gz |...)',
# required=True)
# @click.option('-dpf', '--docker_path_file',
# help='docker_path_file(/idseqflow-dockerfile-container-share/20211109/testdata/K200004870_L01_60.fq |...)',
# required=True)
# @click.option('-hqp', '--hostfilter_qc_outpath',
# help='hostfilter_qc_outpath_name(/home/lijing/all_test/test_202112/tmp_star_trimmomatic_priceseq_dedup_lzw_bowtie2_gsnap.fasta |...)',
# required=True)
def get_hostfilter_qc_align_finalout(reference,outputpath,id_of_data,inputfile,docker,gsnap_aligndatabase_index,gsnap_aligndatabase,inputfile_name,star_hostdatabase,bowtie2_hostdatabase,gsnap_hostdatabase,gsnap_hostdatabase_index):
# 复制
val1 = os.system("echo '复制文件进入docker' && docker cp %s %s:/" % (inputfile, docker))
# 使用移动改权限
# val1 = os.system("echo '将文件改权限移入docker' && docker cp %s %s" % (inputfile,docker_path))
val2 = os.system("echo '解压测序文件' && docker exec -it %s /bin/bash -c 'gunzip /%s' " % (docker, inputfile_name))
val3 = os.system(
"echo 'star去宿主' && docker exec -it %s /bin/bash -c 'STAR --outFilterMultimapNmax 99999 --outFilterScoreMinOverLread 0.5 --outFilterMatchNminOverLread 0.5 --outReadsUnmapped Fastx --outFilterMismatchNmax 999 --outSAMmode None --clip3pNbases 0 --runThreadN 15 --genomeDir %s --readFilesIn /%s --outFileNamePrefix star_tmp' " % (
docker, star_hostdatabase, str(inputfile_name)[:-3]))
val3_5 = os.system("echo '删除fq文件' && docker exec -it %s /bin/bash -c 'rm /%s' " %(docker,str(inputfile_name)[:-3]))
# 这里的诺因接头可以再进行添加
val4 = os.system(
"echo 'Trimmomatic去接头' && docker exec -it %s /bin/bash -c 'java -jar /usr/local/bin/trimmomatic-0.38.jar SE -phred33 star_tmpUnmapped.out.mate1 tmp_star_trimmomatic.fastq ILLUMINACLIP:/idseqflow-dockerfile-container-share/20211109/testdata/nuoyin_adapter.fasta:2:30:10:8:true MINLEN:35' " % (
docker))
val5 = os.system(
"echo 'PriceSeq去低质量' && docker exec -it %s /bin/bash -c 'PriceSeqFilter -a 12 -rnf 90 -log c -f tmp_star_trimmomatic.fastq -o tmp_star_trimmomatic_priceseq.fastq -rqf 85 0.98' " % (
docker))
val6 = os.system(
"echo 'Dedup去重复' && docker exec -it %s /bin/bash -c 'idseq-dedup -l 70 -i tmp_star_trimmomatic_priceseq.fastq -o tmp_star_trimmomatic_priceseq_dedup.fastq' " % (
docker))
val7 = os.system(
"echo 'fastp去低复杂' && docker exec -it %s /bin/bash -c 'fastp -y 30 -i tmp_star_trimmomatic_priceseq_dedup.fastq -o tmp_star_trimmomatic_priceseq_dedup_lzw.fastq' " % (
docker))
val8 = os.system(
"echo 'bowtie2去宿主' && docker exec -it %s /bin/bash -c 'fastq_to_fasta -i tmp_star_trimmomatic_priceseq_dedup_lzw.fastq -o tmp_star_trimmomatic_priceseq_dedup_lzw.fasta && bowtie2 -q -x %s -f --very-sensitive-local -S tmp_star_trimmomatic_priceseq_dedup_lzw_bowtie2.sam --seed random_seed -p 15 -U tmp_star_trimmomatic_priceseq_dedup_lzw.fasta' && docker cp %s:/tmp_star_trimmomatic_priceseq_dedup_lzw_bowtie2.sam %s/tmp_star_trimmomatic_priceseq_dedup_lzw_bowtie2.sam " % (
docker,bowtie2_hostdatabase,docker,outputpath))
with open('%s/tmp_star_trimmomatic_priceseq_dedup_lzw_bowtie2.fasta' %outputpath, 'w') as output_read:
with open('%s/tmp_star_trimmomatic_priceseq_dedup_lzw_bowtie2.sam' %outputpath, 'r', encoding='utf-8') as sam_f:
# Skip headers
read = sam_f.readline()
while read and read[0] == '@':
read = sam_f.readline()
while read:
part = read.split("\t")
# Read unmapped
if part[1] == "4":
# Do NOT append /1 to read id
output_read.write(">%s\n%s\n" % (part[0], part[9]))
read = sam_f.readline()
# val9=取100万个序列
val9=os.system(
"echo 'gsnap去宿主' && docker cp %s/tmp_star_trimmomatic_priceseq_dedup_lzw_bowtie2.fasta %s:/ && docker exec -it %s /bin/bash -c 'gsnap -A sam --batch=0 --use-shared-memory=0 --gmap-mode=all --npaths=1 --ordered -t 32 --max-mismatches=40 -D %s -d %s -o tmp_star_trimmomatic_priceseq_dedup_lzw_bowtie2_gsnap.sam tmp_star_trimmomatic_priceseq_dedup_lzw_bowtie2.fasta' && docker cp %s:/tmp_star_trimmomatic_priceseq_dedup_lzw_bowtie2_gsnap.sam %s/tmp_star_trimmomatic_priceseq_dedup_lzw_bowtie2_gsnap.sam " % (
outputpath,docker,docker,gsnap_hostdatabase,gsnap_hostdatabase_index,docker,outputpath))
with open('%s/tmp_star_trimmomatic_priceseq_dedup_lzw_bowtie2_gsnap.fasta' %outputpath, 'w') as output_read:
with open('%s/tmp_star_trimmomatic_priceseq_dedup_lzw_bowtie2_gsnap.sam' %outputpath, 'r', encoding='utf-8') as sam_f:
# Skip headers
read = sam_f.readline()
while read and read[0] == '@':
read = sam_f.readline()
while read:
part = read.split("\t")
# Read unmapped
if part[1] == "4":
# Do NOT append /1 to read id
output_read.write(">%s\n%s\n" % (part[0], part[9]))
read = sam_f.readline()
val0 = os.system(
"echo '复制文件进入docker进行gsnap比对' && docker cp %s/tmp_star_trimmomatic_priceseq_dedup_lzw_bowtie2_gsnap.fasta %s:/ && docker exec -it %s /bin/bash -c 'gsnapl -A m8 --batch=0 --use-shared-memory=0 --gmap-mode=none --npaths=100 --ordered -t 36 --max-mismatches=40 -D %s -d %s /tmp_star_trimmomatic_priceseq_dedup_lzw_bowtie2_gsnap.fasta > temp_gsnap.m8' && docker cp %s:/temp_gsnap.m8 %s " % (
outputpath, docker, docker, gsnap_aligndatabase, gsnap_aligndatabase_index,docker, outputpath))
val0 = os.system("echo '比对文件输出' ")
accession_file1 = reference
df_accession = pd.read_csv(accession_file1, sep='\t')
df_accession1=df_accession.iloc[:,1:3]
accession_to_taxid_dict=dict(zip(df_accession1['accession.version'],df_accession1['taxid']))
search_file1='%s/temp_gsnap.m8' %outputpath
df_search=pd.read_csv(search_file1,sep='\t',header=None)
df_search.columns=['Query id','accession.version','% identity','alignment length','mismatches','gap openings',
'q. start','q. end','s. start','s. end','e-value','bit score']
df_search['taxid']=df_search['accession.version'].apply(lambda x : accession_to_taxid_dict[x])
df_search.to_csv('%s/tmp_blastn_match_taxid' % outputpath,index=False,sep='\t',header=None)
df_search['taxid'].to_csv('%s/tmp_blastn_match_taxid_totaxid' % outputpath,index=False,sep='\t',header=None)
val1=os.system("taxonkit lineage %s/tmp_blastn_match_taxid_totaxid | taxonkit reformat -f '{k};{g};{s}' |cut -f 1,3 > %s/tmp_blastn_match_taxid_totaxid_lineage " % (outputpath,outputpath)) #https://blog.csdn.net/njafei/article/details/72764990
val2=os.system('paste %s/tmp_blastn_match_taxid_totaxid_lineage %s/tmp_blastn_match_taxid > %s/tmp_blastn_match_taxid_lineage' % (outputpath,outputpath,outputpath))
if val1==0 and val2==0:
print("taxid匹配上lineage成功")
else:
print("taxid匹配上lineage失败")
file1 = '%s/tmp_blastn_match_taxid_totaxid_lineage' % outputpath
df1 = pd.read_csv(file1, sep='\t', header=None)
# print(df1)
file2 = '%s/tmp_blastn_match_taxid_lineage' % outputpath
df2 = pd.read_csv(file2, sep='\t', header=None)
# print(df2)
df3 = df2.iloc[:, 0:df2.shape[1] - 1]
df3.columns = ['taxid', 'taxnomy_k_g_s', 'Query id', 'accession.version', '% identity', 'alignment length',
'mismatches', 'gap openings',
'q. start', 'q. end', 's. start', 's. end', 'e-value', 'bit score']
# print(df3)
# print(df3.loc[2])
df4 = df3['taxnomy_k_g_s'].str.split(';', expand=True)
df4.columns = ['kingdom', 'genus', 'species']
# print(df4)
df5 = pd.concat([df3, df4], axis=1, names=['kingdom', 'genus', 'species'])
# print(df5)
# print(df5.columns)
df5.to_csv('%s/tmp_blastn_match_taxid_lineage_add_kgs' % outputpath,index=False, sep='\t')
file1 = '%s/tmp_blastn_match_taxid_lineage_add_kgs' % outputpath
df1 = pd.read_csv(file1, sep='\t')
# print(df1)
# print(df1.loc[:,'% identity'])
print(df1['% identity'].value_counts(ascending=False))
for i in range(len(df1['% identity'])):
if df1['% identity'][i] > 80:
i += 1
else:
print("unmeet")
break
list1 = df1['Query id'].unique().tolist()
# print(type(df1['Query id'].unique()))
# print(list1)
# print(len(list1),df1.shape[0])
# for i,item in df1.iterrows():
# print(i,item.Query_id,item.species)
# print(df1['Query id'].items)
list3 = []
for i in range(len(list1)):
list2 = df1[df1['Query id'] == list1[i]]['species'].tolist()
# print(list2)
# m=m+1
# print(m)
# if len(set(list2))==1 怎么判定一个List里面的元素是不是全部一样
for j in range(1, len(list2)):
if str(list2[j]).split(' ', 2)[0] != str(list2[0]).split(' ', 2)[0]:
# print("不一样!")
# break
# print(list1[i])
list3.append(list1[i])
break
else:
# print("一样")
j = j + 1
# print(list3)
for i in range(len(list3)):
df1 = df1[~df1['Query id'].isin([str(list3[i])])] # 删除df表中包含指定字符串的行数据
# print(df1)
df2 = df1.reset_index(drop=True) # 重建索引
# print(df2)
df2.to_csv('%s/tmp_blastn_match_taxid_lineage_add_kgs_delrepeat' % outputpath,index=None, sep='\t')
file2 ='%s/tmp_blastn_match_taxid_lineage_add_kgs_delrepeat' % outputpath
df3 = pd.read_csv(file2, sep='\t')
df4 = df3.drop_duplicates(subset='Query id').reset_index()
df4.to_csv('%s/tmp_blastn_match_taxid_lineage_add_kgs_delrepeat1' % outputpath,index=None, sep='\t')
#转换输出
file1 = '%s/tmp_blastn_match_taxid_lineage_add_kgs_delrepeat1' % outputpath
df1 = pd.read_csv(file1, sep='\t')
# print(df1)
df2 = df1.taxid.value_counts()
# print(type(df2))
# id_to_taxnomy_dict=link.set_index('taxid')['taxnomy_k_g_s'].to_dict()
df3 = df1['taxid'].value_counts(ascending=False)
# print(df3)
dict_df3 = {'taxid': df3.index, 'count': df3.values}
# print(type(dict_df3))
df4 = pd.DataFrame(dict_df3)
# print(df4)
# print(df3.iloc[:,1])
# dict1=df1.to_dict()
# print(dict1)
# dict2=df1.set_index('taxid').to_dict()
# print(dict2)
dict_id_to_taxnomy = dict(zip(df1['taxid'], df1['taxnomy_k_g_s']))
# print(dict_id_to_taxnomy)
df4['taxnomy_k_g_s'] = df4['taxid'].apply(lambda x: dict_id_to_taxnomy[x])
# print(df4)
df5 = df4['taxnomy_k_g_s'].str.split(';', expand=True)
df5.columns = ['kingdom', 'genus', 'species']
df6 = pd.concat([df4, df5], axis=1, names=['kingdom', 'genus', 'species'])
# print(df6)
del df6['taxnomy_k_g_s']
# print(df6)
df6['type'] = 0
# df6['genus_Cname']='0'
# print(df6.loc[1, 'kingdom'])
# print(df6.loc[:, 'type'])
for i in range(len(df6)):
if df6.loc[i, 'kingdom'] in 'Viruses':
df6.loc[i, 'type'] = '病毒'
elif df6.loc[i, 'kingdom'] == 'Bacteria':
df6.loc[i, 'type'] = '细菌'
elif df6.loc[i, 'kingdom'] in 'fungi':
df6.loc[i, 'type'] = '真菌'
# print(df6)
df6['Name'] = df6['species'] # col_names.insert(0,'Name') #name=species name ,level 1=kingdom
df6['level 1'] = df6['kingdom']
col_names = df6.columns.tolist()
# print(col_names)
col_names_index = ['Name', 'level 1', 'taxid', 'count', 'kingdom', 'genus', 'species', 'type']
df7 = df6.reindex(columns=col_names_index)
# print(df7)
df7.to_csv('%s/tmp_blastn_match_taxid_lineage_add_kgs_delrepeat1_out.txt' % outputpath, index=None, sep='\t')
#same name different taxid combine
file1 = '%s/tmp_blastn_match_taxid_lineage_add_kgs_delrepeat1_out.txt' % outputpath
df1 = pd.read_csv(file1, sep='\t')
print(df1)
print(df1['Name'])
print(df1['count'])
# print(df1.loc['Name','count'])
# print(df1.groupby('Name').sum())
# df2=df1.drop(df1.columns[['taxid','count']])
print(df1.columns)
df2=df1.drop(columns=['taxid','count'],axis=1) #去掉2列,axis=0或axis='rows',都表示展出行,也可用labels参数删除行, inplace=True
print(df2)
df3=df1.drop(columns=['count'],axis=1) #去掉一列
df4 = df3.drop_duplicates(subset=['Name', 'level 1'],keep='first') # 按全量字段去重, 保留第一个(默认)
df4.reset_index(drop=True,inplace=True) #重置索引
# print(df4['taxid'])
print(df4)
list1 =df1['Name'].unique().tolist()
# print(len(list1))
# if df1.loc[i,'Name']=
df4["count"]=0
for i in range(len(list1)):
print(list1[i])
# print(df1[df1["Name"]==list1[i]]['count'].tolist())
list2=df1[df1["Name"]==list1[i]]['count'].tolist()
# print(type(df1[df1["Name"]==list1[i]]['count'])) <class 'pandas.core.series.Series'>
count_sum = sum(list2) # 元素相加
if df4.loc[i,'Name']==list1[i]:
df4['count'][i]=count_sum
print(df4)
# print(count_sum)
order = ['Name', 'level 1', 'taxid', 'count', 'kingdom', 'genus', 'species','type']
df4 = df4[order]
df4.to_csv('%s/%s_blastn_match_taxid_lineage_add_kgs_delrepeat1_out.txt' % (outputpath,id_of_data),index=None,sep='\t')
if __name__ == '__main__':
get_hostfilter_qc_align_finalout()
IDseq_flow:hostfilter_qc_align_finalout_20211220
最新推荐文章于 2024-07-29 21:16:24 发布