pandas

pandas

import pandas as pd
"""
demo1
"""
# 步骤1,查看采样前数据(迭代序号3之前数据分布)
# before_add = pd.read_csv("111", sep="\t")
# print("# 迭代序号3之前数据分布")
# print(before_add['label'].value_counts())
# new_data = before_add[:12865]
# new_data.to_csv("./222", sep="\t", columns=None, index=None)
# print(new_data['label'].value_counts())

# 步骤2,查看采样的数据(迭代序号3的1000条数据分布)
# test_2000 = pd.read_csv("test_2000", sep="\t")
# print("# 针对迭代序号3,这1000条数据分布")
# print(test_2000['label'].value_counts())  # light 131  # not_clickbait    869

"""
demo2
"""
# 制作新预测数据集,挑出100条。合并并输出文件
# title_data = pd.read_csv("V2.test.content", sep="\t")
# S_data_p = title_data[title_data["label"] == "serious"]
# L_data_p = title_data[title_data["label"] == "light"]
# SL_data_p = pd.concat([S_data_p, L_data_p])[:50]
# N_data_p = title_data[title_data["label"] == "not_clickbait"][100:150]
# p_data = pd.concat([SL_data_p, N_data_p])
# p_data.to_csv("./predict_new.csv", sep="\t", index=None)
"""
demo3
"""
# 步骤1,读取两种数据格式文件
# n1 = pd.read_csv("E:\腾讯项目\数据分析\label\label_from_mario_202010261.txt", sep="\t")
# n2 = pd.read_csv("E:\腾讯项目\数据分析\label\label_from_mario_20201027.txt", sep="\t")
#
# L_data = n1[n1["label"] == "light"]
# N_data = n1[n1["label"] == "not_clickbait"]
# print(L_data['label'].value_counts())
# print(N_data['label'].value_counts())
# 步骤2,更换列的顺序
# n1_pre_data = pd.concat([L_data, N_data])
# n1_pre_data = n1_pre_data[["cmsid", "label", "media_id", "title", "content", "entropy"]]
# L2_data = n2[n2["label"] == "light"]
# N2_data = n2[n2["label"] == "not_clickbait"]
# N2_data_ = pd.concat([L2_data, N2_data])
# N2_data_ = N2_data_[["cmsid", "label", "media_id", "title", "content", "entropy"]]
# N3_data_ = pd.concat([n1_pre_data, N2_data_])
# print(N3_data_['label'].value_counts())
# print(N3_data_.columns)
# # 步骤3,删除序列
# n1_data = N3_data_.drop(["entropy"], axis=1)
# L_pre_data = n1_data[n1_data["label"] == "light"][:272]
# N_pre_data = n1_data[n1_data["label"] == "not_clickbait"][:728]
# n1_pre_data = pd.concat([L_pre_data, N_pre_data])
# n1_pre_data.to_csv("./pre_data.csv", sep="\t", index=None)
#
# L_tr_data = n1_data[n1_data["label"] == "light"][272:]
# N_tr_data = n1_data[n1_data["label"] == "not_clickbait"][728:2344]
# n1_tr_data = pd.concat([L_tr_data, N_tr_data])
# n1_tr_data.to_csv("./tr_data.csv", sep="\t", index=None)
#
# # 步骤4,第二次合并数据集
# LX = pd.read_csv("E:\腾讯项目\数据分析\lx", sep="\t")
# S_data = LX[LX["label"] == "serious"]
# L_data = LX[LX["label"] == "light"]
# N_data = LX[LX["label"] == "not_clickbait"]
# # print(LX['label'].value_counts())
# n2_tr_data = pd.concat([LX, n1_tr_data])
# n2_tr_data.to_csv("./all_data.csv", sep="\t", index=None)
# print(2222222222222222222)
# print(n2_tr_data['label'].value_counts())
#
""""
demo4
"""
# # 去重
# tr_data = pd.read_csv("tr_data.csv", sep="\t")
# print(tr_data.columns)
# tr_data.drop_duplicates(subset=['cmsid'],keep='first',inplace=True)
# print(tr_data["label"].value_counts())
# print("=====================================")
# all_data = pd.read_csv("all_data.csv", sep="\t")
# print(all_data["label"].value_counts())
# k = all_data.drop_duplicates(subset=['cmsid'],keep='first')
# print("======================================")
# print(k["label"].value_counts())

""""
demo5
"""
# 删除空行
tr_data = pd.read_csv("train_4", sep="\t")
tr_data1 = tr_data.dropna()
tr_data1.to_csv("train_04", sep="\t", index=None)

""""
demo6
"""
# t = open("V2.test.content", encoding="utf-8")
# g = open("V1", encoding="utf-8")
# tr_data_ = t.readlines()
# gr_data_ = g.readlines()
# print(len(tr_data_))
# print(type(tr_data_))
# with open("./dt", "w", encoding='utf-8') as f:
#     for i in range(len(tr_data_)):
#         if i == 0:
#             f.write(tr_data_[0])
#             continue
#         k_ = tr_data_[i*4-3:i*4+1]
#         g_ = gr_data_[i*4-3:i*4+1]
#         # if i == 5:
#         #     print(k_)
#         k = "".join(k_)
#         g = "".join(g_)
#         f.write(k)
#         f.write(g)

"""demo7 打印正负标签比例
"""

# import pandas as pd
# from collections import Counter
#
# train_data = pd.read_csv("V2.test.content", sep="\t")
#
# print(train_data[:5])
# # v = Counter(train_data['label'].values)
# # print(type(v))
# # 打印正负标签比例
# print(dict(Counter(train_data['label'].values)))
# train_data = train_data.values.tolist()
# print(train_data[:10])

"""demo8 iloc
"""
# import pandas as pd
# c = [2,5,7]
# df = pd.read_csv("V1",sep="\t")
# df = df.iloc[[2,5,7]] # 或者df = df.iloc[]
# df.to_csv("0100",index=None,sep="\t")

"""demo9 loc
"""
# import pandas as pd
# df = pd.read_csv("robot_sql_result_20210108210552.txt", sep="\t")
# print(df.columns)
# dk = df.loc[:,"info"]
# dk.to_csv("robot_sql", index=None, sep="\t")
# print(dk.head())

"""demo10
"""
# import json
# f = open("robot_sql_result_20210108210552.txt",encoding='utf-8')
# g = open("robot_sql_r.txt", "w", encoding='utf-8')
# data = f.readlines()
# for i in range(len(data)):
#     if i == 0:
#         g.write("al_time\t" +
#                 "dcf_time\t" +
#                 "train_time\t" +
#                 "al_sample_time\t" +
#                 "al_preproc_time\t" +
#                 "train_extract_time\t" +
#                 "train_preproc_time\t" +
#                 "train_forecast_time\n")
#         continue
#     l = data[i].split("\t")
#     a = json.loads(l[-1])
#     print(a['time'])
#     al_time = a['time']["al_end"]-a['time']["al_start"]
#     dcf_time = a['time']["dcf_finish"]-a['time']["dcf_create"]
#     train_time = a['time']["train_end"]-a['time']["train_start"]
#     al_sample_time = a['time']["al_sample_end"]-a['time']["al_sample_start"]
#     al_preproc_time = a['time']["al_preproc_end"]-a['time']["al_preproc_start"]
#     train_extract_time = a['time']["train_extract_end"]-a['time']["train_extract_start"]
#     train_preproc_time = a['time']["train_preproc_end"]-a['time']["train_preproc_start"]
#     train_forecast_time = a['time']["train_forecast_end"]-a['time']["train_forecast_start"]
#     # print(al_time)
#     # print(dcf_time)
#     g.write(str(al_time) + "\t" +
#             str(dcf_time) + "\t" +
#             str(train_time) + "\t" +
#             str(al_sample_time) + "\t" +
#             str(al_preproc_time) + "\t" +
#             str(train_extract_time) + "\t" +
#             str(train_preproc_time) + "\t" +
#             str(train_forecast_time) + "\n")
"""demo11 robot项目
"""
import numpy as np
# tr_data = pd.read_csv("202011_.txt", sep="\t")
# print(tr_data.shape)
# #(599011, 4)
# print(tr_data.columns)
#Index(['business_id', 'json_extract(data,'$.extra.media_id')',
# 'json_extract(data,'$.extra.title')',
# 'json_extract(data,'$.extra.content_html')'],
# dtype='object')
# # # 步骤1、去重
# tr_data.drop_duplicates(subset=['business_id'],keep='first',inplace=True)
# print(tr_data.shape)    #(420877, 4)
# # # 步骤2、删除空行
# tr_data = tr_data.dropna()
# print(tr_data.shape)    #(420843, 4)
# # tr_data.to_csv("202011.txt", sep="\t", index=None)
# # print("=====================================")
# # 步骤3、更换列名字
# tr_data.columns=['cmisid', 'media_id', 'title', 'content']
# # tr_data_ = tr_data['media_id'].apply(lambda x: x if re.search("^\d+$",str(x)) else np.nan)
# # tr_data = tr_data.dropna()
# # print(tr_data.shape)
# # 步骤4、删除'media_id'非数字的行
# df = tr_data[pd.to_numeric(tr_data['media_id'], errors='coerce').notnull()]
# # print(df.shape)
# df.to_csv("20210103", sep="\t", index=None)
# df = pd.read_csv("20210103",sep="\t")
# df = df.iloc[:5000]
# df.to_csv("20210112",index=None,sep="\t")

"""demo12 替换
"""
# import pandas as pd
# df = pd.read_csv("./dt_new_2",sep="\t")
# # print(df['label'].value_counts())
# df['label'].replace('serious', 'light', inplace=True)
# df.to_csv("./dt_3",sep="\t")

"""demo13 shuffle
"""
# 方法1:
# import random
# with open("new_dedup","r",encoding="utf-8") as f:
#     data = f.readlines()
#     k = data[0]
#     print(type(k))
#     l = data[1:]
#     print(type(l))
#     random.shuffle(l)
#     str_data = "".join(l)
# with open("new","w",encoding="utf-8") as f:
#     f.write(k)
#     f.write(str_data)
# 方法2:
# import pandas as pd
# df = pd.read_csv("new_dedup", sep="\t")
# df = df.sample(frac=1)
# df.to_csv("00k",sep="\t")
"""demo14 不放回sample
"""
# import pandas as pd
# tr_data = pd.read_csv("./train_random5863", sep="\t") #12863
# new = tr_data.sample(3863, replace=False)
# new.to_csv("train_random3863", sep="\t", index=None)
"""demo15 
"""
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值