-》需求分析(业务性)
-》数据采集(采集的框架和平台处理架构)
-》数据清洗(处理的手段)
-》数据分析(分析的逻辑)
-》结果展示(数据可视化)
二、维度指标分析
-》日期:可以最后统计分析的时候根据日期进行分组,可以建分区表
-》PV:count(url)
-》UV:count(distinct(guid))
-》平均访问时长:
-》每个用户登录都会产生一个session
-》统计每个session平均停留的时间
-》进入页面第一条时间戳记录,最后离开页面的最后一条时间戳记录,求到访问时长
-》按照session进行分组,求到全部的平均访问时长
-》二跳率:
-》一个用户在一个session会话中至少点击访问了大于等于2的页面
-》求访问页面超过2个的用户人数,联想到PV
-》统计PV大于等于2的再除以总的人数
-》独立IP数:
-》对于IP地址做去重
三、准备工作
create database hive_log;
-》主表
create table hive_log_source(
id string,
url string,
referer string,
keyword string,
type string,
guid string,
pageId string,
moduleId string,
linkId string,
attachedInfo string,
sessionId string,
trackerU string,
trackerType string,
ip string,
trackerSrc string,
cookie string,
orderCode string,
trackTime string,
endUserId string,
firstLink string,
sessionViewNo string,
productId string,
curMerchantId string,
provinceId string,
cityId string,
fee string,
edmActivity string,
edmEmail string,
edmJobId string,
ieVersion string,
platform string,
internalKeyword string,
resultSum string,
currentPage string,
linkPosition string,
buttonPosition string
)
partitioned by (date string)
row format delimited fields terminated by '\t'
stored as textfile;
-》加载主表的数据
load data local inpath '/opt/datas/2015082818' into table hive_log_source partition(date='2015082818');
四、数据清洗
-》访问渠道
-》收藏夹
-》手动输入网址
-》通过用户分享的连接进入
-》广告连接
....
-》需要获取的是第一条的记录
-》着陆页面
-》用户进入网站的第一个页面
-》需要获取的是第一条的记录
-》分析同一个session会话中的第一个页面
-》着陆页面之前的地址
-》需要获取的是第一条的记录
创建一个分析业务表,提取我们需要的字段
create table session_info(
session_id string ,
guid string ,
trackerU string ,
landing_url string ,
landing_url_ref string ,
user_id string ,
pv string ,
stay_time string ,
min_trackTime string ,
ip string ,
provinceId string
)
partitioned by (date string)
ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t' ;
-》提取三个字段,单独建表过滤清洗
trackerU string ,
landing_url string ,
landing_url_ref string ,
-》创建第一张临时表
create table session_tmp as
select
sessionId session_id,
max(guid) guid,
max(endUserId) user_id,
count(distinct url) pv,
(unix_timestamp(max(trackTime))-unix_timestamp(min(trackTime))) stay_time,
min(trackTime) min_trackTime,
max(ip) ip,
max(provinceId) provinceId
from hive_log_source
where date='2015082818'
group by sessionId;
创建第二张临时表
create table track_tmp as
select
sessionId session_id,
trackTime trackTime,
url landing_url,
referer landing_url_ref,
trackerU trackerU
from hive_log_source
where date='2015082818';
insert overwrite table session_info partition(date='2015082818')
select
a.session_id session_id,
max(a.guid) guid,
max(b.trackerU) trackerU,
max(b.landing_url) landing_url,
max(b.landing_url_ref) landing_url_ref,
max(a.user_id) user_id,
max(a.pv) pv,
max(a.stay_time) stay_time,
max(a.min_trackTime) min_trackTime,
max(a.ip) ip,
max(a.provinceId) provinceId
from session_tmp a join track_tmp b on
a.session_id = b.session_id and a.min_trackTime = b.trackTime
group by a.session_id;
-》数据分析
create table result as
select
date date,
sum(pv) PV,
count(distinct guid) UV,
count(distinct case when user_id is not null then guid else null end) login_user,
count(distinct case when user_id is null then guid else null end) visitor,
avg(stay_time) avg_time,
(count(case when pv>=2 then session_id else null end)/count(session_id)) second_jump,
count(distinct ip) Ip
from session_info
where date='2015082818'
group by date;
create table result2 as
select
date date,
sum(pv) PV,
count(distinct guid) UV,
count(distinct case when length(user_id)!=0 then guid else null end) login_user,
count(distinct case when length(user_id)=0 then guid else null end) visitor,
avg(stay_time) avg_time,
(count(case when pv>=2 then session_id else null end)/count(session_id)) second_jump,
count(distinct ip) Ip
from session_info
where date='2015082818'
group by date;