Triage沙箱监控

Triage沙箱可以免费分析恶意软件样本。最先进的恶意软件分析沙箱,具有您需要的所有功能。

在可定制的环境中提交大量样本,并对许多恶意软件系列进行检测和配置提取。立即查看公开报告并对您的恶意软件进行分类!

官方网址:https://tria.ge/
在这里插入图片描述

监控该沙箱结果产出的python代码如下:

import requests
import datetime
from bs4 import BeautifulSoup
import hashlib
import random
import json
import time
import os
import re

user_agent = {
              "User-Agent": 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36'
             }
#proxies = {'https': '127.0.0.1:7890'}
proxies = {}
write_file = 'triage_ioc.txt'

def get_webcontent(url):
    
    try:
        response = requests.get(url,proxies=proxies,headers=user_agent,timeout=10)
        if response.status_code == 200:
            soup = BeautifulSoup(response.text, "html.parser")
            return soup
    except requests.exceptions.RequestException as e:
        print("[ERROR]:Fail to get WebContent!", e)
        return False
            

def parse_sample_class(element):

    sample_class = []
    temp = element.split('span class=')
    if len(temp)<2:
        return sample_class
    else:
        for str in temp:
            if str.find('span')!=-1:
                sample_class.append(str.split('">')[-1].split('</')[0])
        return sample_class
        
def get_sample_c2(sample_id):
    
    sanbox_url = 'https://tria.ge/' + sample_id
    print(sanbox_url)
    soup = get_webcontent(sanbox_url)
    if soup == False or None:
        return []
    temp = soup.find_all("span", class_="clipboard")
    regex = re.compile('(?<=title=").*?(?=")')
    c2 = regex.findall(str(temp))
    return c2
   
def download_from_url(url,save_file):
    
    try:
        response = requests.get(url,proxies=proxies,stream=True,timeout=8)
        if response.status_code == 200:
            with open(save_file, "wb") as f:
                for ch in response:            
                    f.write(ch)
                f.close()          
    except requests.exceptions.RequestException as e:
        print("Error downloading file:"+save_file, e)
        
def write_to_file(str):

    with open(write_file,'a',encoding='utf-8') as d:
        d.write(str+'\n')
        d.close

def parse_triage():        
    
    soup= get_webcontent('https://tria.ge/reports/public')
    if soup == False:
        return
    #print(soup)
    #print('——————————————————————————————————————————————————————')
    
    createtime = soup.find_all("div", class_="column-created")
    hash = soup.find_all("div", class_="column-hash")
    filename = soup.find_all("div", class_="column-target")
    fileclass = soup.find_all("div", class_="tags nano")
    score = soup.find_all("div", class_="column-score")
    regex = re.compile('(?<=data-sample-id=").*?(?=")')  #提取href=""之间的url链接
    sample_id = regex.findall(str(soup))
    
    i = 0
    while i<len(createtime):
    
        if str(score[i]).find('Running')!=-1 or str(score[i]).find('Submission')!=-1:
            i = i + 1
            continue
            
        create_time = str(createtime[i]).split('">')[-1].split('</')[0]
        print(create_time)
        
        file_name = str(filename[i]).split('title="')[-1].split('">')[0]
        print(file_name)
        
        sha256 = str(hash[i]).split('clipboard="')[-1].split('"')[0]
        if sha256.find('<div class=')==-1:
            print(sha256)
        else:
            print("")
            
        file_class = parse_sample_class(str(fileclass[i]))
        print(file_class)
        
        sanbox_score = str(score[i]).split('">')[-1].split('</')[0]
        print(sanbox_score)
        
        print(sample_id[i])
        
        if sanbox_score!='' and int(sanbox_score)>=8:
            c2 = get_sample_c2(sample_id[i])
            print(c2)
        
        if sanbox_score!='' and int(sanbox_score)>=8:
            if len(sha256) ==64:
                write_to_file(sha256)
            if c2!=[] and len(c2)<5:
                for domain in c2:
                    write_to_file(domain)
        
        #if len(sha256) ==64:
            #print('Download sample:',sha256)
            #download_url = 'https://tria.ge/samples/' + sample_id[i] +'/sample.zip'
            #save_file = './sample/' + sha256
            #download_from_url(download_url,save_file)
        
        #input()
        
        time.sleep(10)
        
        print('--------------------------------------------------------------------------------------------')
        
        i = i + 1

        
if __name__ == "__main__":

    if not os.path.exists('sample'):
        os.makedirs('sample')
        
    while 1:
        parse_triage()
        time.sleep(300)
        print(datetime.datetime.now())
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

摔不死的笨鸟

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值