我想直到这个程序在初始拓扑计算系统初始全局风险过程中,所需的 path_id、probability 和 cal_edges_impact 数据来源和获取方式# 需要读进来图的拓扑,总的来说形成两个实验,实验1是删除边后不更新,实验二是每次删除边后都要更新一波
# 实验一 输入:待删除边的list,所有边的排名,CIA和影响值(直接用前面得到的即可),exp值好像需要查表
import copy
import math
import json
import csv
import os
import sys
from lib import get_allpath as method_get_all_path
from datetime import datetime
from itertools import permutations
import numpy as np
import argparse
def exp_normalize(exp):
min_value, max_value = 0.121090464, 3.88
for i in range(len(exp)):
exp[i] = (exp[i] - min_value) / (max_value - min_value) #避免exp归一化为0,导致概率为0
return exp
# def get_path():
# Nodes = None
# edges = None
# edgesDict = {}
# Numedges = {}
# edgesNum = {}
# with open('../input/nodes.json', 'r') as f:
# Nodes = json.load(f)
# nodes = Nodes['nodes']
# start = Nodes['start']
# end = Nodes['end']
# with open('../input/edges.json', 'r') as f:
# edges = json.load(f)
# edges = edges['edges']
# for edge in edges:
# if edgesDict.get(edge[0]) == None:
# edgesDict[edge[0]] = [edge[1]]
# else:
# edgesDict[edge[0]].append(edge[1])
# edgesDict[end] = []
# myPath = method_get_all_path.GetPath(nodes, edgesDict, start, end)
# Path = myPath.getPath() # 存放全部攻击路径
# edgePath = myPath.tran(Path)
# return edgePath
def get_path(topo_path):
with open(topo_path, 'r') as f:
Topo = json.load(f)
f.close()
start = Topo['start']
end = Topo['goal'][0]['host']
nodes = Topo['hosts']
edges = Topo['edges']
newnodes = []
newedges = []
exp=[]
tempedges={}
for node in nodes:
newnodes.append(node['host_name'])
for edge in edges:
path = [edge['source'], edge['target']]
newedges.append(path)
exp.append(edge['val']['ES'])
return start, end, newnodes, newedges,exp[1:]
def get_path1(start, end, nodes, edges):
edgesDict = {}
Numedges = {}
edgesNum = {}
for edge in edges:
if edgesDict.get(edge[0]) == None:
edgesDict[edge[0]] = [edge[1]]
else:
edgesDict[edge[0]].append(edge[1])
edgesDict[end] = []
myPath = method_get_all_path.GetPath(nodes, edgesDict, start, end)
Path = myPath.getPath() # 存放全部攻击路径
edgePath = myPath.tran(Path)
return edgePath
def get_data():
with open("result/rank.json", 'r') as f:
result = json.load(f)
f.close()
rank=result['ranking']
# if top_n < (len(rank)-1):
delete_edges=[]
for i in range(len(rank)-1):
delete_edges.append(rank[i][0])
ranking=[0] * (len(rank)-1)
for index, item in enumerate(rank):
if item[0]!='E1':
ranking[int(item[0][1:])-2]=index+1
with open("result/tmp/CIA_Service.json", 'r') as f:
result = json.load(f)
f.close()
C=result['CIA_Service'][2][0]
I=result['CIA_Service'][2][1]
A=result['CIA_Service'][2][2]
service_impact=result['CIA_Service'][2][3]
return ranking,C,I,A,service_impact,delete_edges
# else:
# print('top_n out of range.')
# os.system("ps -ef|grep -E 'main.py|expand.py|Weight|cal_metrics.py|Rank|static'|grep -v grep|awk '{print $2}'|xargs kill -9")
# # os.system('ps -ef|grep python3')
# with open('result/exception.csv', 'a', newline='') as f:
# writer = csv.writer(f)
# writer.writerow(['top_n out of range.', datetime.now(), list[i][0], list[i][1], list[i][2]])
# f.close()
def all_paths_conversion(edges,all_paths):
path_id = [] # 每条边用编号代替
path_node = [] # 记录攻击路径上的点
# with open('../input/edgeID.json', 'r') as f:
# edge_ID_dict = json.load(f)
# f.close()
for path in all_paths:
temp_id = []
temp_path = ['V0']
for source, target in path:
# temp_id.append(edge_ID_dict[source + ',' + target])
temp_path.append(target)
for i in range(len(edges)):
if edges[i][0]==source and edges[i][1]==target:
temp_id.append("E"+str(i+1))
path_id.append(temp_id)
path_node.append(temp_path)
return path_id, path_node
def normalize_edges_impact(edges_impact):
min_value = min(edges_impact)
# print(min_value)
if min_value < 0:
for i in range(len(edges_impact)):
edges_impact[i] = (edges_impact[i] - min_value) * 0.95 + 0.05
return edges_impact
def difference_global_risk(ini_Global_Risk,Global_Risks):
difference=[]
temp=ini_Global_Risk
for risk in Global_Risks:
difference.append(temp-risk[1])
temp=risk[1]
return difference
def min_max(data):
data = np.array(data)
data_norm = (data - data.min()) / (data.max() - data.min())
return data_norm
def get_risk(path_id,probability,cal_edges_impact,delete_edges):
ini_Global_Risk = 0
for path in path_id:
path_risk = 0
p = 1
for edge in path:
p *= probability[int(edge[1:])-1]
path_risk += p * cal_edges_impact[int(edge[1:])-1]
ini_Global_Risk += path_risk
Global_Risks=[["0",ini_Global_Risk]]
for delete_edge in delete_edges:
for i in range(len(path_id)-1, -1, -1):
if delete_edge in path_id[i]:
path_id.pop(i)
Global_Risk = 0
for path in path_id:
path_risk = 0
p = 1
for edge in path:
p *= probability[int(edge[1:]) - 1]
path_risk += p * cal_edges_impact[int(edge[1:]) - 1]
Global_Risk += path_risk
Global_Risks.append([delete_edge,Global_Risk])
if Global_Risk==0:
break
return Global_Risks
def probability_save_csv(ranking,exp,nor_exp,probability):
rank=[0]*len(ranking)
data=[['edge','L','nor_exp','exp']]
for index, value in enumerate(ranking):
rank[value-1]=['E'+str(index+2)]
rank[value-1]+=[probability[index],nor_exp[index],exp[index]]
data+=rank
with open("result/analysis/risk_data/likehood.csv", "w", newline="", encoding="utf-8-sig") as f:
writer = csv.writer(f)
writer.writerows(data)
# print(data)
def impact_save_csv(ranking,edges_impact):
with open("result/tmp/CIA_Service.json", 'r') as f:
result = json.load(f)
f.close()
CIA_Service=result['CIA_Service']
data=[['edge','C','I','A','FC','归一化C','归一化I','归一化A','归一化FC','权重归一化C','权重归一化I','权重归一化A','权重归一化FC','impact']]
rank=[0]*len(ranking)
for index, value in enumerate(ranking):
rank[value-1]=['E'+str(index+2)]
for i in range(3):
rank[value-1]+=[CIA_Service[i][0][index],CIA_Service[i][1][index],CIA_Service[i][2][index],CIA_Service[i][3][index]]
rank[value-1]+=[edges_impact[index]]
data+=rank
with open("result/analysis/risk_data/impact.csv", "w", newline="", encoding="utf-8-sig") as f:
writer = csv.writer(f)
writer.writerows(data)
# print(data)
def each_risk(ranking,probability,edges_impact):
data=[['edge','impact','L','I*L']]
rank=[0]*len(ranking)
for index, value in enumerate(ranking):
rank[value-1]=['E'+str(index+2)]
rank[value-1]+=[edges_impact[index],probability[index],edges_impact[index]*probability[index]]
data+=rank
with open("result/analysis/risk_data/each_risk.csv", "w", newline="", encoding="utf-8-sig") as f:
writer = csv.writer(f)
writer.writerows(data)
# print(data)
def steps_risk(path_id,probability,cal_edges_impact):
n=int(input('对前n个全排序,n='))
data=[['steps','初始风险']]
with open("result/rank.json", 'r') as f:
result = json.load(f)
f.close()
rank=result['ranking']
temp=[]
for i in range(n):
temp.append(rank[i][0])
data[0].append('残余风险'+str(i+1))
all_permutations = permutations(temp)
for steps in all_permutations:
result=get_risk(copy.deepcopy(path_id),probability,cal_edges_impact,steps)
risk=[]
risk += [value[1] for value in result]
data.append([steps]+risk)
with open("result/analysis/risk_data/steps_risk.csv", "w", newline="", encoding="utf-8-sig") as f:
writer = csv.writer(f)
writer.writerows(data)
# print(data)
def depend_risk(path_id,probability,cal_edges_impact):
data=[['path_id','edge','L','I','result']]
for index, item in enumerate(path_id):
path_risk = 0
p = 1
for edge in item:
temp=[index,edge]
p *= probability[int(edge[1:])-1]
path_risk += p * cal_edges_impact[int(edge[1:])-1]
temp+=[p,cal_edges_impact[int(edge[1:])-1],p * cal_edges_impact[int(edge[1:])-1]]
data.append(temp)
with open("result/analysis/risk_data/depend_risk.csv", "w", newline="", encoding="utf-8-sig") as f:
writer = csv.writer(f)
writer.writerows(data)
# print(data)
if __name__ == '__main__':
if os.path.exists("result/rank.json"):
# print("*********************",flush=True)
parser = argparse.ArgumentParser(description="一个简单的命令行参数示例")
parser.add_argument('--likehood', action='store_true', help="如果有这个参数就运行probability_save_csv")
parser.add_argument('--impact', action='store_true', help="如果有这个参数就运行probability_save_csv")
parser.add_argument('--steps', action='store_true', help="如果有这个参数就运行steps_risk")
parser.add_argument('--any_com',type=str, help="如果有这个参数就运行any_com")
parser.add_argument('topo_file', type=str, help="拓扑文件路径")
parser.add_argument('risk_file', type=str, help="风险文件路径")
args = parser.parse_args()
inputpath=args.topo_file;outputpath=args.risk_file
start, end, nodes, edges,exp= get_path(inputpath)
all_paths = get_path1(start, end, nodes, edges)
ranking,C,I,A,service_impact,delete_edges=get_data()
# rank = [3, 2, 1, 8, 14, 7, 6, 12, 5, 9, 10, 13, 4, 11, 15, 18, 16, 19, 17]
# exp = [0.8, 2.8, 3.9, 2.8, 0.8, 2.8, 2.8, 3.9, 2.8, 3.9, 2.8, 2.3, 3.9, 3.9, 3.9, 3.9, 3.9, 3.9, 3.9]
# print(len(exp))
nor_exp = exp_normalize(copy.deepcopy(exp))#归一化
probability1 = [1] + [(nor_exp[i]/math.log2(ranking[i]+1)) for i in range(len(ranking))] #计算出了每个漏洞的利用概率(11)
probability2=[1] + nor_exp#没有(11)
probability3 = [1] + [nor_exp[i]/ranking[i] for i in range(len(ranking))]#只除排名n
if args.likehood:
# print("*********************",flush=True)
probability_save_csv(ranking,exp,nor_exp,probability3[1:])
# CIA 和影响值貌似应该用不带权重的算???
# C = [0.06105172071668108, 0.06105172071668108, 0.12210344143336216, 0.06105172071668108, 0.06105172071668108, 0.06105172071668108, 0.12210344143336216, 0.12210344143336216, 0.12210344143336216, 0.12210344143336216, 0.12210344143336216, 0.09157758107502162, 0.12210344143336216, 0.06105172071668108, 0.12210344143336216, 0.06105172071668108, 0.12210344143336216, 0.06105172071668108, 0.06105172071668108]
# I = [0.16177730695175202, 0.16177730695175202, 0.16177730695175202, 0.16177730695175202, 0.16177730695175202, 0.16177730695175202, 0.16177730695175202, 0.08088865347587601, 0.16177730695175202, 0.08088865347587601, 0.16177730695175202, 0.12133298021381402, 0.16177730695175202, 0.16177730695175202, 0.16177730695175202, 0.16177730695175202, 0.16177730695175202, 0.16177730695175202, 0.16177730695175202]
# A = [0.08088865347587601, 0.16177730695175202, 0.16177730695175202, 0.16177730695175202, 0.08088865347587601, 0.16177730695175202, 0.16177730695175202, 0.08088865347587601, 0.16177730695175202, 0.08088865347587601, 0.16177730695175202, 0.08088865347587601, 0.16177730695175202, 0.08088865347587601, 0.16177730695175202, 0.08088865347587601, 0.16177730695175202, 0.08088865347587601, 0.08088865347587601]
# service_impact = [0.25563627897950925, 0.25563627897950925, 0.21303023248292435, 0.25563627897950925, 0.25563627897950925, 0.25563627897950925, 0.21303023248292435, 0.21303023248292435, 0.21303023248292435, 0.21303023248292435, 0.1704241859863395, 0.1704241859863395, 0.12781813948975462, 0.12781813948975462, 0.12781813948975462, 0.12781813948975462, 0.12781813948975462, 0.12781813948975462, 0.12781813948975462]
edges_impact2 =[0] + [C[i] + I[i] + A[i] + service_impact[i] for i in range(len(C))]#(12)
edges_impact1 =[0] + [(C[i] + I[i] + A[i] + service_impact[i])/math.log2(ranking[i]+1) for i in range(len(C))]#(12)除log
edges_impact3 =[0] + [(C[i] + I[i] + A[i] + service_impact[i])/ranking[i] for i in range(len(C))]#除n
if args.any_com:
delete_edges=args.any_com.split(',')
path_id, path_node = all_paths_conversion(edges,all_paths)
risks1=get_risk(copy.deepcopy(path_id),probability1,edges_impact1,delete_edges)
risks2=get_risk(copy.deepcopy(path_id),probability2,edges_impact2,delete_edges)
risks3=get_risk(copy.deepcopy(path_id),probability3,edges_impact3,delete_edges)
if args.impact:
impact_save_csv(ranking,edges_impact3[1:])
each_risk(ranking,probability3[1:],edges_impact3[1:])
depend_risk(copy.deepcopy(path_id),probability3,edges_impact3)
if args.steps:
steps_risk(copy.deepcopy(path_id),probability3,edges_impact3)
# depend_risk(copy.deepcopy(path_id),probability3,edges_impact3)
# ini_Global_Risk = 0
# # print(edges_impact[9],edges_impact[14],edges_impact[7])
# # print(probability[3],probability[10],probability[15],probability[8])
# #print('===============================')
# for path in path_id:
# path_risk = 0
# p = 1
# for edge in path:
# p *= probability[int(edge[1:])-1]
# path_risk += p * cal_edges_impact[int(edge[1:])-1]
# ini_Global_Risk += path_risk
# # print('不更新排名')
# # print("ini_Global_Risk", ini_Global_Risk)
# Global_Risks=[]
# std_risks=[]
# for delete_edge in delete_edges:
# for i in range(len(path_id)-1, -1, -1):
# if delete_edge in path_id[i]:
# path_id.pop(i)
# Global_Risk = 0
# for path in path_id:
# path_risk = 0
# p = 1
# for edge in path:
# p *= probability[int(edge[1:]) - 1]
# path_risk += p * cal_edges_impact[int(edge[1:]) - 1]
# Global_Risk += path_risk
# #print(Global_Risk)
# Global_Risks.append([delete_edge,Global_Risk])
# std_risks.append(Global_Risk)
# if Global_Risk==0:
# break
out={}
out.update({'Risk1': risks1})
out.update({'Risk2': risks2})
out.update({'Risk3': risks3})
with open(outputpath, 'w') as f:
json.dump(out, f)
f.close()
# difference=difference_global_risk(ini_Global_Risk,Global_Risks)
# std = np.std(min_max(std_risks),ddof=0)
# std_difference = np.std(min_max(difference),ddof=0)
# print('s_std:',std)
# print('s_std_difference:',std_difference)
else:
print('rank.json do not exists.')
os.system("ps -ef|grep -E 'main.py|expand.py|Weight|cal_metrics.py|Rank|static'|grep -v grep|awk '{print $2}'|xargs kill -9")
# os.system('ps -ef|grep python3')
with open('result/exception.csv', 'a', newline='') as f:
writer = csv.writer(f)
writer.writerow(['rank.json do not exists.', datetime.now(), list[i][0], list[i][1], list[i][2]])
f.close()