Pandas 二十六:处理分析网站原始访问日志

目标:真实项目的实战,探索Pandas的数据处理与分析

实例: 数据来源:博客http://www.crazyant.net/ 的访问日志

实现步骤:
1、读取数据、清理、格式化
2、统计爬虫spider的访问比例,输出柱状图
3、统计http状态码的访问占比,输出饼图
4、统计按小时、按天的PV/UV流量趋势,输出折线图

1、读取数据并清理格式化

1

import pandas as pd
import numpy as np

pd.set_option('display.max_colwidth', -1)

from pyecharts import options as opts
from pyecharts.charts import Bar,Pie,Line

2

# 读取整个目录,将所有的文件合并到一个dataframe
data_dir = "./datas/crazyant/blog_access_log"

df_list = []

import os
for fname in os.listdir(f"{data_dir}"):
    df_list.append(pd.read_csv(f"{data_dir}/{fname}", sep=" ", header=None, error_bad_lines=False))

df = pd.concat(df_list)

3

df.head()

3
在这里插入图片描述

4

df = df[[0, 3, 6, 9]].copy()
df.head()

4

		0					3				 6		9
0	106.11.153.226	[02/Dec/2019:22:40:18	200	YisouSpider
1	42.156.254.60	[02/Dec/2019:22:40:23	201	Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.81 YisouSpider/5.0 Safari/537.36
2	106.11.159.254	[02/Dec/2019:22:40:27	200	YisouSpider
3	106.11.157.254	[02/Dec/2019:22:40:28	200	YisouSpider
4	42.156.137.109	[02/Dec/2019:22:40:30	201	Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.81 YisouSpider/5.0 Safari/537.36

5

df.columns = ["ip", "stime", "status", "client"]
df.head()

5
ip	stime	status	client0	106.11.153.226	[02/Dec/2019:22:40:18	200	YisouSpider1	42.156.254.60	[02/Dec/2019:22:40:23	201	Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.81 YisouSpider/5.0 Safari/537.362	106.11.159.254	[02/Dec/2019:22:40:27	200	YisouSpider3	106.11.157.254	[02/Dec/2019:22:40:28	200	YisouSpider4	42.156.137.109	[02/Dec/2019:22:40:30	201	Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.81 YisouSpider/5.0 Safari/537.36

6

df.dtypes

6

ip        object
stime     object
status    int64 
client    object
dtype: object

2、统计spider的比例

7

df["is_spider"] = df["client"].str.lower().str.contains("spider")
df.head()

7
ip	stime	status	client	is_spider0	106.11.153.226	[02/Dec/2019:22:40:18	200	YisouSpider	True1	42.156.254.60	[02/Dec/2019:22:40:23	201	Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.81 YisouSpider/5.0 Safari/537.36	True2	106.11.159.254	[02/Dec/2019:22:40:27	200	YisouSpider	True3	106.11.157.254	[02/Dec/2019:22:40:28	200	YisouSpider	True4	42.156.137.109	[02/Dec/2019:22:40:30	201	Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.81 YisouSpider/5.0 Safari/537.36	True

8

df_spider = df["is_spider"].value_counts()
df_spider

8

False    46641
True     3637 
Name: is_spider, dtype: int64

9

bar = (
        Bar()
        .add_xaxis([str(x) for x in df_spider.index])
        .add_yaxis("是否Spider", df_spider.values.tolist())
        .set_global_opts(title_opts=opts.TitleOpts(title="爬虫访问量占比"))
)
bar.render()

9
在这里插入图片描述

3、访问状态码的数量对比

10

df_status = df.groupby("status").size()
df_status

10

status
200    41924
201    3432 
206    70   
301    2364 
302    23   
304    19   
400    20   
403    92   
404    1474 
405    12   
444    846  
500    1    
504    1    
dtype: int64

11

list(zip(df_status.index, df_status))

11

[(200, 41924),
 (201, 3432),
 (206, 70),
 (301, 2364),
 (302, 23),
 (304, 19),
 (400, 20),
 (403, 92),
 (404, 1474),
 (405, 12),
 (444, 846),
 (500, 1),
 (504, 1)]

12

pie = (
        Pie()
        .add("状态码比例", list(zip(df_status.index, df_status)))
        .set_series_opts(label_opts=opts.LabelOpts(formatter="{b}: {c}"))
    )
pie.render()

12
在这里插入图片描述

4、实现按小时、按天粒度的流量统计

13

df.head()

13
在这里插入图片描述

14

df["stime"] = pd.to_datetime(df["stime"].str[1:], format="%d/%b/%Y:%H:%M:%S")
df.head()

14
在这里插入图片描述

15

df.set_index("stime", inplace=True)
df.sort_index(inplace=True)
df.head()

15
在这里插入图片描述

16

df.index

16

DatetimeIndex(['2019-12-02 22:40:18', '2019-12-02 22:40:23',
               '2019-12-02 22:40:27', '2019-12-02 22:40:28',
               '2019-12-02 22:40:30', '2019-12-02 22:40:46',
               '2019-12-02 22:41:52', '2019-12-02 22:41:52',
               '2019-12-02 22:41:55', '2019-12-02 22:42:16',
               ...
               '2019-12-07 21:30:16', '2019-12-07 21:30:17',
               '2019-12-07 21:30:19', '2019-12-07 21:30:20',
               '2019-12-07 21:30:21', '2019-12-07 21:30:22',
               '2019-12-07 21:30:23', '2019-12-07 21:30:56',
               '2019-12-07 21:30:58', '2019-12-07 21:31:02'],
              dtype='datetime64[ns]', name='stime', length=50278, freq=None)

21

# 按小时统计
#df_pvuv = df.resample("H")["ip"].agg(pv=np.size, uv=pd.Series.nunique)

# 按每6个小时统计
#df_pvuv = df.resample("6H")["ip"].agg(pv=np.size, uv=pd.Series.nunique)

# 按天统计
df_pvuv = df.resample("D")["ip"].agg(pv=np.size, uv=pd.Series.nunique)

df_pvuv.head()

21

			pv		uv
stime		
2019-12-02	288		70
2019-12-03	10285	1180
2019-12-04	13618	1197
2019-12-05	10485	1152
2019-12-06	9469	1261

22

line = (
        Line()
        .add_xaxis(df_pvuv.index.to_list())
        .add_yaxis("PV", df_pvuv["pv"].to_list())
        .add_yaxis("UV", df_pvuv["uv"].to_list())
        .set_global_opts(
            title_opts=opts.TitleOpts(title="PVUV数据对比"),
            tooltip_opts=opts.TooltipOpts(trigger="axis", axis_pointer_type="cross")
        )
    )
line.render()

22
在这里插入图片描述

  • 0
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值