import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import re
%matplotlib inline
train = pd.read_csv('train_1.csv').fillna(0)
print(train.shape)
train.head()
打印info信息看到数据大小 609.8+ MB
for col in train.columns[1:]:
train[col] = pd.to_numeric(train[col], downcast = 'integer')
# pd.to_numeric() 转变数据为数字型
train.head()
train.info()
def get_language(page):
res = re.search('[a-z][a-z].wikipadia.org', page)
if res:
return res.group()[0:2]
return 'na'
train['lang'] = train.Page.map(get_language)
from collections import Counter
print(Counter(train.lang))
Counter({‘en’: 24108, ‘ja’: 20431, ‘de’: 18547, ‘na’: 17855, ‘fr’: 17802, ‘zh’: 17229, ‘ru’: 15022, ‘es’: 14069})
# 转变为字典结构
lang_sets = {}
lang_sets['en'] = train[train.lang == 'en'].iloc[:, 0:-1]
lang_sets['ja'] = train[train.lang == 'ja'].iloc[:, 0:-1]
lang_sets['de'] = train[train.lang == 'de'].iloc[:, 0:-1]
lang_sets['na'] = train[train.lang == 'na'].iloc[:, 0:-1]
lang_sets['fr'] = train[train.lang == 'fr'].iloc[:, 0:-1]
lang_sets['zh'] = train[train.lang == 'zh'].iloc[:, 0:-1]
lang_sets['ru'] = train[train.lang == 'ru'].iloc[:, 0:-1]
lang_sets['es'] = train[train.lang == 'es'].iloc[:, 0:-1]
# 计算每种语言 : 每天搜索词条总次数 / 总词条次数
sums = {}
for key in lang_sets:
sums[key] = lang_sets[key].iloc[:,1:].sum(axis=0) / lang_sets[key].shape[0]
days = [r for r in range(sums['en'].shape[0])]
labels = {'en':'English', 'ja':'Japanese', 'de':'German', 'na':'Media',
'fr':'French', 'zh':'Chinese', 'ru':'Russian', 'es':'Spanish'}
fig = plt.figure(figsize = (12, 10)
plt.ylabel(plt.ylabel('Views per Page')
plt.xlabel('Day')
plt.title('Pages in Different Languages')
for key in sums:
plt.plot(days, sums[key], label = labels[key])
plt.legend()
- 单词条搜索频次变化
def plot_entry(key, idx):
data = lang_sets[key].iloc[idx, 1:]
fig = plt.figure(figsize = (10, 4))
plt.plot(days, data)
plt.xlabel('day')
plt.ylabel('views')
plt.title(train.iloc[lang_sets[key].index[idx], 0])
plt.show()
idx = [1, 5, 10, 50, 100, 250,500, 750,1000,1500,2000,3000,4000,5000]
for i in idx:
plot_entry('en', i)
…
- 词条搜索量排行榜
top_pages = {}
for key in lang_sets:
print(key)
sum_set = pd.DataFrame(lang_sets[key][['Page']])
sum_set['total'] = lang_sets[key].sum(axis = 1)
sum_set = sum.set.sort_values('total', ascending = False)
print(sum_set.head(10))
top_pages[key] = sum_set.index[0] # 排名第一的词条索引
print('\n')
- 每种语言排名第一词条
for key in top_pages:
fig = plt.figure(figsize = (10, 4))
cols = train.columns
cols = cols[1: -1]
data = train.loc[top_pages[key], cols]
plt.plot(days, data)
plt.xlabel('Days')
plt.ylabel('Views')
plt.title(train.loc[top_pages[key], 'Page'])
plt.show()