2021年电工杯A题 高铁牵引供电系统运行负荷预测

#2021年电工杯A题 高铁牵引供电系统运行负荷预测

论文代码,自用留存,以备核查

"""
===============🈵🈵🈵🈵🈵===================
作者:LENOVO
日期:2022年10月08日
=======================================================
"""
import numpy as np
import h5py
import matplotlib.pyplot as plt
import math

feature=h5py.File('P_TPSN.mat')
# print(feature['P_TPSN'].shape)
data_inital = feature['P_TPSN'][:]
data_inital = data_inital.T
x_axis_data = []
for ever in range(0,data_inital.shape[0]):
    x_axis_data.append(ever)
#==============计算特征值=================
def features(data):
    """
    xulie_feature中第一行是峰值,第二行是均值,第三行是均方根,第四行是标准差,第五行是时间长度,
    第六行是耗电量,第七行是再生能量
    :param data:
    :return:
    """
    xulie_feature = np.zeros([7,len(data)])
    for i in range(0, len(data)):
        xulie_feature[0][i]=max(data[i])#最大值
        xulie_feature[4][i]=len(data[i])#序列的长度就是时间的跨度
        ave_sum = 0
        squre_sum= 0
        norm_sum = 0
        consum_sum = 0
        renew_sum = 0
        for j in range(0,len(data[i])):#对第i个序列进行特征值提取
            ave_sum+= data[i][j]#平均值中的求和
            squre_sum+=data[i][j]**2#均方根中的平方和
            norm_sum += (data[i][j]-(ave_sum/len(data[i])))**2#当前值减去均值的平方和
            if data[i][j]>0:
                consum_sum+=data[i][j]
            elif data[i][j]<0:
                renew_sum+=abs(data[i][j])
        xulie_feature[1][i]=ave_sum/len(data[i])#计算平均值
        xulie_feature[2][i]=math.sqrt(squre_sum/len(data[i]))#计算均方根
        xulie_feature[3][i]=math.sqrt(norm_sum//len(data[i]))#计算标准差,根号下平方和除上n
        xulie_feature[5][i]=consum_sum/3600
        xulie_feature[6][i]=renew_sum/3600
    return xulie_feature
#=====================特征值标准化============================================
def standardization(data):
    feature_norm = np.zeros([data.shape[0],data.shape[1]])
    for k in range(0,data.shape[1]):#特征值矩阵的列数
        fea_sum = 0
        fea_deva_sum = 0
        for l in range(0,data.shape[0]):#特征是矩阵的行数
            fea_sum+=data[l][k]
        fea_aver = fea_sum / data.shape[0]
        for m in range(0, data.shape[0]):
            fea_deva_sum+= (data[m][k]-fea_aver)**2
        fea_deviation = math.sqrt(fea_deva_sum/data.shape[0])
        for n in range(0,data.shape[0]):
            feature_norm[n][k] = (data[n][k]-fea_aver)/fea_deviation
    return feature_norm
#=======================特征值的相似性计算==============================
def similarity(lie_A,lie_B):
    sim_min = 0
    sim_max = 0
    for i in range(0,lie_A.shape[0]):
        if lie_A[i]<=lie_B[i]:
            sim_min+=lie_A[i]
            sim_max+=lie_B[i]
        elif lie_A[i]>lie_B[i]:
            sim_max+=lie_A[i]
            sim_min+=lie_B[i]
    return sim_min/sim_max
#================归一化================================
def normalization(data):
    norm_ation = np.zeros([data.shape[0],data.shape[1]])
    for k in range(0, data.shape[1]):
        maximum = max(data[:,k])
        minimum = min(data[:,k])
        for l in range(0, data.shape[0]):
            norm_ation[l][k]=(data[l][k]-minimum)/(maximum-minimum)
    return norm_ation





#============算出前七个小时的空载工况数据平均值==============
Kong_sum = 0
for evey in range(0,25200):
    Kong_sum += abs(data_inital[evey])
Kong_JunZhi = Kong_sum/25200#空载工况下十个时间点的均值31.3KW
# ==============
win_distance = 20#窗口距离,也就是多少个值去求平均值
All_pow_xulie = []#用来存放所有的大于空载工况1.5倍的序列
All_index_xulie = []#用于存放所有的序列的位置
Pow_xulie = []#当前大于空载工况1.5倍的序列
index_xulie = []#当前序列的索引位置
all_win_Junzhi =[]#存放所有的均值,主要是用来帮助判断上一个窗口的均值是否大于1.5倍空载工况
for i in x_axis_data:
    if  i <86400-win_distance:
        One_win_sum = 0
        for j in range(0, win_distance):
            One_win_sum += abs(data_inital[i + j])
        win_JunZhi = One_win_sum / win_distance
        all_win_Junzhi.append(win_JunZhi[0])
        if win_JunZhi >= 1.5 * Kong_JunZhi:
            Pow_xulie.append(data_inital[i + (win_distance // 2)][0])
            index_xulie.append(x_axis_data[i + (win_distance // 2)])
        elif i>1 and all_win_Junzhi[i-1]>=1.5 * Kong_JunZhi and win_JunZhi<1.5* Kong_JunZhi :
            All_pow_xulie.append(Pow_xulie)
            All_index_xulie.append(index_xulie)
            Pow_xulie = []
            index_xulie =[]
#===========把赛选出来的功率序列拿出来=====================================
power_data = []
index_data = []
for m in range(0,len(All_pow_xulie)):
    if len(All_pow_xulie[m])>200:
        power_data.append(All_pow_xulie[m])
        index_data.append(All_index_xulie[m])
#==========================把所有序列画出来==========================================
# for k in range(0,len(power_data)):
#     # axis_power_data = range(0, len(power_data[k]))
#     plt.plot(index_data[k], power_data[k], 'k-', alpha=0.8, linewidth=1, label='No.3USP8')
#     plt.legend(loc="upper right")
#     plt.xlabel('Time/S')
#     plt.ylabel('Load/w')
#     plt.show()
# =======================================24小时功率图================================
# plt.plot(x_axis_data, data_inital, 'k-',  alpha=0.8, linewidth=1, label='load data')
# plt.legend(loc="upper right")
# plt.xlabel('Time/S')
# plt.ylabel('Load/w')
# plt.show()

# #=================NO3USP8====3车上行不停车8编组=========================
# plt.plot(x_axis_data[26220:26600], data_inital[26220:26600], 'k-',  alpha=0.8, linewidth=1, label='No.3USP8')
# plt.legend(loc="upper right")
# plt.xlabel('Time/S')
# plt.ylabel('Load/w')
# plt.show()

#=============================分类===============================
def categoty(classify):
    calssify_list = []
    max_min_similar = np.zeros([classify.shape[0],classify.shape[1]])
    max_min_argsort= np.zeros([classify.shape[0], classify.shape[1]])
    for i in range(0,classify.shape[0]):
        max_min_similar[i,:] = np.sort(-classify[i,:])
        max_min_argsort[i,:]= np.argsort(-classify[i,:])
    for k in range(0,classify.shape[0]):
        calssify_row = []
        for j in range(0,classify.shape[1]):
            if max_min_similar[k][j]<-0.965:
                calssify_row.append(max_min_argsort[k][j])
        calssify_list.append(calssify_row)
    calssify_done = []
    for l in calssify_list:
        calssify_CF = []+l
        for m in range(0,len(l)):
            hang =int(l[m])
            calssify_CF=calssify_CF+calssify_list[hang]
        calssify_CF = list(set(calssify_CF))
        calssify_done.append(calssify_CF)
    for s in range(0,len(calssify_done)):
        calssify_done[s].sort()
    for p in range(0,len(calssify_done)):
        for r in range(0,len(calssify_done)):
            if p!=r and calssify_done[p]!=[] and calssify_done[r]!=[]:
                if calssify_done[p]==calssify_done[r]:
                    calssify_done[r]=[]
                elif calssify_done[p][0]==calssify_done[r][0] or calssify_done[p][-1]==calssify_done[r][-1]:
                    if len(calssify_done[p])<len(calssify_done[r]):
                        calssify_done[p] = []
                    else:
                        calssify_done[r]=[]
    classify_return = []
    for w in range(0,len(calssify_done)):
        if calssify_done[w] !=[]:
            classify_return.append(calssify_done[w])
    return classify_return
# ======================================数据长度补一致============================
def all_len(len_data):
    changdu_len = 0
    back_data = []
#     先把所有数据拿出来,判断长度最长的长度是多少
    for i in range(0,len(len_data)):
        if changdu_len<len(len_data[i]):
            changdu_len=len(len_data[i])
    for j in range(0, len(len_data)):
        diff_len=changdu_len-len(len_data[j])
        if diff_len !=0:
            for k in range(0, diff_len // 2):
                len_data[j].insert(0,0)
                len_data[j].append(0)
            if diff_len%2 ==1:
                len_data[j].append(0)
        back_data.append(len_data[j])
    return back_data




#====================Main=========================================
features_data = features(power_data)
features_norm = normalization(features_data)
similar_array = np.zeros([features_norm.shape[1], features_norm.shape[1]])
for aqi in range(0,features_norm.shape[1]):
    for aqj in range(0,features_norm.shape[1]):
        if aqi != aqj:
            similar_array[aqi][aqj] = similarity(features_norm[:,aqi],features_norm[:,aqj])
        else:
            similar_array[aqi][aqj] = 0
classify_done = categoty(similar_array)
for aqk in range(0,len(classify_done)):
    classify_done[aqk] = list(map(int, classify_done[aqk]))
plt.subplot(1,2,1)
plt.plot(index_data[classify_done[2][1]], power_data[classify_done[2][1]], 'k-', alpha=0.8, linewidth=1)
plt.xlabel('Time/S')
plt.ylabel('Load/w')
plt.subplot(1,2,2)
plt.plot(index_data[classify_done[2][2]], power_data[classify_done[2][2]], 'k-', alpha=0.8, linewidth=1)
plt.xlabel('Time/S')
plt.ylabel('Load/w')
plt.show()
fitting = []
fitting_similar = []
for anl in range(0,len(classify_done)):
    fitting_1 = []
    for anm in range(0,len(classify_done[anl])):
        fitting_1.append(power_data[classify_done[anl][anm]])
    fitting.append(fitting_1)
# fitting变量是准备拟合的数据,这里总共八类,其中最后一类原始数据只有1个,则不拟合,直接使用。
# fitting变量总共7个列表,代表七种类型的功率负荷,每个列表中含有多个实际的数据,但是需要注意的是,同一类型下的数据长度不一样
# 需要做的是把长度不一致的数据,在数据的头和尾处补零处理。
for xmi in range(0,len(fitting)):
    fitting_similar.append(all_len(fitting[xmi]))
# 现在fitting_similar变量是长度一致的待拟合的功率负荷数据,下面就可以进行拟合了
import csv
csv_path = r"D:\1_whit\3_资料\1_学习材料\高铁牵引供电系统典型日负荷预测研究\project\fuheku8.csv"
with open(csv_path,'w',newline='')as f:
    write = csv.writer(f)
    for row in fitting_similar[7]:
        write.writerow(row)

评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值