天池竞赛——服务器故障预测

首先还是导入库

import os
import nltk
# nltk.download('punkt')
import numpy as np
import pandas as pd
from nltk.tokenize import word_tokenize
from sklearn.ensemble import RandomForestClassifier as rf
from gensim.models.doc2vec import Doc2Vec, TaggedDocument

from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score, f1_score
from sklearn.model_selection import GridSearchCV

import warnings
warnings.filterwarnings('ignore')

data_dir = '../data'

这里会遇到一个问题:下面调用nltk的时候会卡住,原因是没有puntk这个文件。

  • 首先,访问nltk下载地址,找到并下载punkt tokenizer models
    在这里插入图片描述
  • 将下载好的punkt.zip文件解压到 C:/用户/用户名/AppData/Roaming/nltk_data/tokenizers目录下
    在这里插入图片描述

1. 读取数据

sel_data = pd.read_csv(os.path.join(data_dir, 'raw/preliminary_sel_log_dataset.csv'))
sel_data.sort_values(by=['sn', 'time'], inplace=True)
sel_data.reset_index(drop=True, inplace=True)

# --------------------------------
# 运行:
#     sn_list: 长13705的 服务器名列表
#     tail_msg_list : 长13705的 以字符串格式存储的 13705台服务器的日志信息   ' Drive Slot HDD_L_14_Status | Drive Fault | Asserted. Drive Slot / Bay HDD_L_14_Status | Drive Fault | Asserted. Drive Slot HDD_L_14_Status | Drive Fault | Deasserted. Drive Slot / Bay HDD_L_14_Status | Drive Fault | Deasserted',
# --------------------------------
sn_list = sel_data['sn'].drop_duplicates(keep='first').to_list()   # 统计所有SERVER服务器  ---->  共13705台服务器
tail_msg_list = ['.'.join(sel_data[sel_data['sn']==i]['msg'].tail(10).to_list()) for i in sn_list]  # 取出每台服务器的最后十条日志, 同一台服务器的日志信息用.连接,保存为字符串格式

# --------------------------------
# 保存:
# 将两个列表保存为txt格式,方便下次读取
# 至此,服务器名 和 日志信息另存完毕
# --------------------------------
with open(os.path.join(data_dir, "/sn_list.txt"), 'w', encoding='utf-8')  as f:
    for sn in sn_list:
        f.writelines(sn)
        f.writelines("\n")  # 须分行

with open(os.path.join(data_dir, "/msg_list.txt"), 'w', encoding='utf-8')  as f:
    for msg in tail_msg_list:
        f.writelines(msg)
        f.writelines("\n")
 
# --------------------------------
# 另存标签df
# label中的SERVER可能会出现重复的情况
# 重复表明 同一个SERVER在不同时间出现多次不同LABEL的故障
#
# 将故障发生前的最后十条信息以txt格式保存
# 每行由 日志信息+故障标签 组成
# 日志信息和故障标签用'$'保存——已确认日志信息中无'$'
# --------------------------------
label = pd.read_csv(os.path.join(data_dir, 'raw/preliminary_train_label_dataset.csv'))
label.sort_values(by=['sn', 'fault_time'], inplace=True)
label.reset_index(drop=True, inplace=True)


# 保存每条故障发生前10条日志,用字符串格式存储
label_list = []
for i, row in label.iterrows():
    label_list.append('.'.join(sel_data[(sel_data['sn']==row['sn'])&(sel_data['time']<=row['fault_time'])].tail(10)['msg']).lower())
train_label = label['label'].values

with open(os.path.join(data_dir, "/log_label_list.txt"), 'w', encoding='utf-8') as f:
    for i in range(len(label_list)):
        log_label = label_list[i] + "$" + str(train_label[i])
        f.writelines(log_label)
        f.writelines('\n')

# --------------------------------
# 加载服务器名,日志等:
#     sn_list: 长13705的 服务器名列表
#     tail_msg_list : 长13705的 以字符串格式存储的 13705台服务器的日志信息
#               ' Drive Slot HDD_L_14_Status | Drive Fault | Asserted. Drive Slot / Bay HDD_L_14_Status | Drive Fault | Asserted. Drive Slot HDD_L_14_Status | Drive Fault | Deasserted. Drive Slot / Bay HDD_L_14_Status | Drive Fault | Deasserted',
#     tokenized_sent : 长137005的 以列表格式存储的 每条日志的分词
#               ['drive','slot','hdd_l_14_status', '|','drive','fault','|','asserted','.','drive','slot','/','bay','hdd_l_14_status','|','drive','fault','|','asserted','.','drive','slot','hdd_l_14_status','|','drive','fault','|','deasserted','.','drive','slot','/','bay','hdd_l_14_status','|','drive','fault','|','deasserted'],
# --------------------------------

# 读取服务器列表
sn_list = []
with open(os.path.join(data_dir, "sn_list.txt"), "r", encoding= "utf-8") as f:
    for line in f.readlines():
        sn_list.append(line.strip())
# 读取日志列表
msg_list = []
with open(os.path.join(data_dir, "msg_list.txt"), "r", encoding="utf-8") as f:
    for line in f.readlines():
        msg_list.append(line.strip())

# 加载每条日志的分词
tokenized_sent = [word_tokenize(s.lower()) for s in msg_list]

2. 分词 用Doc2Vec做词向量

# --------------------------------
# embedding前的工作: 使用index顺序标记词向量
# tagged_data:
#     长13705的 以列表格式存储的 进行标记后的tokenized_sent
'''
tagged_data = [
    TageedDocument(tokenized_data[0], [0]), 
    TageedDocument(tokenized_data[1], [1]), 
    TageedDocument(tokenized_data[2], [2]), 
]
'''
# --------------------------------
tagged_data = [TaggedDocument(d, [i]) for i, d in enumerate(tokenized_sent)]

# -------------------------------
# Doc2Vec模型:
#   模型是基于Word2Vec基础上,引入了段落的概念
#   Word2Vec将每个单词用一个唯一词向量进行表示
#   Doc2Vec则是将词向量扩充成段落向量,
#   所以Doc2Vec模型需要的输入格式就是TaggedDocument: (词列表, 段落序号)
#
# 模型参数:
#   - sentences: 需要TaggedDocument格式的输入
#   - alpha: 学习率
#   - size: 特征向量的维度 默认100
#   - window: 表示当前词 和 预测词 在一个句子中最大距离是多少
#   - min_count: 词频少于min_count的单词会被丢弃, 默认为5
#
# 更多参数参考: https://blog.csdn.net/mpk_no1/article/details/72510655?ops_request_misc=%257B%2522request%255Fid%2522%253A%2522164662087516780265422133%2522%252C%2522scm%2522%253A%252220140713.130102334..%2522%257D&request_id=164662087516780265422133&biz_id=0&utm_medium=distribute.pc_search_result.none-task-blog-2~all~sobaiduend~default-1-72510655.pc_search_result_control_group&utm_term=doc2vec%E5%8F%82%E6%95%B0&spm=1018.2226.3001.4187
# 模型数学理论: https://blog.csdn.net/itplus/article/details/37969635
# -------------------------------
model = Doc2Vec(tagged_data, vector_size = 10, window = 2, min_count = 1, epochs = 10)

3. 生成训练集数据

# -------------------------------------
# 读取log_label_list.txt文件
# 将log保存到raw_train中
# 将lables保存到train_lable中
# -------------------------------------
raw_train = []
train_lable = []

with open('./log_label_list.txt', "r", encoding='utf-8') as f:
    for line in f.readlines():
        line = line.strip()
        content = line.split('$')
        raw_train.append(content[0])
        train_lable.append(int(content[1]))
train_tokenized = [word_tokenize(s) for s in raw_train]

# --------------------------------
# 创建训练集
# 将raw_train中的字符串转换成词向量,用shuzubaocun
# model.infer_vector()
#   - doc_words: 字符串 或 列表
#   - alpha: 学习率
#   - epochs
# infer_vector()根据 model的输入TaggedDocument构建一个模型
# 对于传入infer_vector()的分词列表创建一个推断词向量
# ---------------------------------
train_data = []
for i in range(len(train_lable)):
    train_data.append(model.infer_vector(train_tokenized[i]))

train_features = np.array(train_data)
train_label = np.array(train_lable)

# 将训练集保存为npy格式
np.save(os.path.join(data_dir, '/processed/train_features.npy'), train_features)
np.save(os.path.join(data_dir, '/processed/train_labels.npy'), train_label)

4. 训练

def load_data(train_ratio = 0.7):
    random.seed(925) # 固定训练集和验证集
    
    features = np.load(os.path.join(data_dir, "processed/train_features.npy"))
    labels = np.load(os.path.join(data_dir, "processed/train_labels.npy"))
    X_train = []
    y_train = []
    X_valid = []
    y_valid = []

    for i in range(len(labels)):
        if random.uniform(0, 1) < train_ratio:
            X_train.append(features[i])
            y_train.append(labels[i])
        else:
            X_valid.append(features[i])
            y_valid.append(labels[i])

    return np.array(X_train), np.array(X_valid), np.array(y_train), np.array(y_valid)

X_train, X_valid, y_train, y_valid = load_data()

# 随机森林===============================
params = {'n_estimators':[20,40,60,80],'max_depth':[12,14,16],
          'criterion':['entropy'],"class_weight":[ 'balanced'],"random_state":[1]}

clf = GridSearchCV(estimator=RandomForestClassifier(),param_grid=params,cv = 5,n_jobs = -1,scoring="f1_macro")
clf.fit(X_train, y_train)  # 模型训练完毕
print("Best Params:{}".format(clf.best_params_))

rdf = RandomForestClassifier(n_estimators=clf.best_params_['n_estimators'], criterion="entropy", random_state =1, max_depth=clf.best_params_['max_depth'], class_weight="balanced")
rdf.fit(X_train, y_train)

def eval(clf, X_train, X_test, y_train, y_test):
    predicted = clf.predict(X_train)
    accu = accuracy_score(y_train, predicted)
    print("训练集准确率:", accu)
    f1 = f1_score(y_train, predicted, average="macro")
    print("训练集f1:", f1)

    predicted = clf.predict(X_test)
    accu = accuracy_score(y_test, predicted)
    print("测试集准确率:", accu)
    f1 = f1_score(y_test, predicted, average="macro")
    print("测试集f1:", f1)

eval(rdf, X_train, X_valid,y_train,  y_valid)

第一次跑 模型的准确率能达到0.67

  • 0
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值