实现数据的离线分析

基础设施:

链接:基础项目的搭建
修改log4j.properties文件

log4j.rootLogger = info,stdout,flume
#log4j.rootLogger = info,stdout,flume

log4j.appender.stdout = org.apache.log4j.ConsoleAppender
log4j.appender.stdout.Target = System.out
log4j.appender.stdout.layout = org.apache.log4j.PatternLayout
log4j.appender.stdout.layout.ConversionPattern = %m%n

# appender flume avro
log4j.appender.flume = org.apache.flume.clients.log4jappender.Log4jAppender
log4j.appender.flume.Hostname = 192.168.65.161
log4j.appender.flume.Port = 22222
log4j.appender.flume.layout = org.apache.log4j.PatternLayout
log4j.appender.flume.UnsafeMode = true

## appender KAFKA
#log4j.appender.KAFKA=kafka.producer.KafkaLog4jAppender
#log4j.appender.KAFKA.topic=jtlog
#log4j.appender.KAFKA.brokerList=brokerNode1:9091,brokerNode2:9092
#log4j.appender.KAFKA.compressionType=none
#log4j.appender.KAFKA.syncSend=true
#log4j.appender.KAFKA.layout=org.apache.log4j.PatternLayout
#log4j.appender.KAFKA.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L %% - %m%n

链接:flume相关的配置与操作

离线数据处理

Hive管理数据

创建flux外部表,管理HDFS中的日志信息。

--创建数据库
create database jtlogdb;
 --使用数据库
use jtlogdb;
--创建表
create external table flux (url string,urlname string,title string,chset string,src string,col string,lg string, je string,ec string,fv string,cn string,ref string,uagent string,stat_uv string,stat_ss string,cip string) partitioned by (reportTime string) row format delimited fields terminated by '|' location '/flux';
create external table flux:创建外部表
partitioned by (reportTime string):根据日期分区
row format delimited fields terminated by '|':通过 | 分割数据
location '/flux':管理HDFS中/flux文件夹

原始数据很多,但并不是所有的数据都跟我们的业务有关。所以。在正式处理之前我们还会对flux表做一次清洗。去除不相干的数据。

--添加分区信息:
alter table flux add partition (reportTime='2020-02-25') location '/flux/reportTime=2020-02-25';
--查询flux表
select * from flux;

显示效果:
插入数据的显示效果

hive常用函数

split

--eg
select split("a|b|c","\\|");
select split("a|b|c","\\|")[0];

在这里插入图片描述

select url,urlname,stat_uv,split(stat_ss,"_")[1],cip from flux where url!='';

在这里插入图片描述

数据清洗

需要的数据字段

--reportTime 产生日期
--url        访问路径
--urlname    页面名称
--uvid       访客id
--ssid       会话id
--sscount    会话编号
--sstime     会话时间戳
--cip        访客ip
--创建数据表
create table dataclear(reportTime string,url string,urlname string,uvid string,ssid string,sscount string,sstime string,cip string) row format delimited fields terminated by '|';
--导入数据
--需要注意的是,在hive中将一个表内的数据导入另一个表中时,两个表的创建结构必须相同,包括分隔符!否则可能会发生数据错乱。
insert overwrite table dataclear select reportTime,url,urlname,stat_uv,split(stat_ss,"_")[0],split(stat_ss,"_")[1],split(stat_ss,"_")[2],cip from flux where url!='';
--查询清洗表
select * from dataclear;

显示效果
清洗表显示

数据处理

简单操作

--UV:访问量也就是一个点击量,实际就是有效日志条数
select count(*) as pv from dataclear where reportTime='2020-09-24';

--PV:独立访客数(记录不同用户的20位随机数(uvid),去重后进行计数。)
select count(distinct uvid) as uv from dataclear where reportTime='2020-09-24';

--SV:独立会话数.session即会话,浏览器用cookie存储sessionid所以不同的cookie就代表不同的会话,其中我们使用了两个浏览器,清除了两次cookie,来模拟不同的会话。
select count(distinct ssid) as sv from dataclear where reportTime='2020-09-24	';

BR:跳出率

--设置
set hive.mapred.mode=nonstrict;

--核心sql语句
select br_taba.a/br_tabb.b as br from 
( 
  select count(*) as a from 
    ( 
      select ssid from dataclear 
      where reportTime='2020-09-24'
      group by ssid having count(ssid)=1 
    ) as br_tab
) as br_taba,
(
  select count(distinct ssid) as b from dataclear
   where reportTime='2020-09-24'
) as br_tabb;
--保留四位小数
select round(br_taba.a/br_tabb.b) as br from 
( 
  select count(*) as a from 
    ( 
      select ssid from dataclear 
      where reportTime='2020-09-24'
      group by ssid having count(ssid)=1 
    ) as br_tab
) as br_taba,
(
  select count(distinct ssid) as b from dataclear
   where reportTime='2020-09-24'
) as br_tabb;

跳出率就是,只访问了一个页面就走了的会话/会话总数。
为了控制结果的精确度,我们应用round函数来对结果进行处理,取小数点后四位(四舍五入)

NewIP:新增IP数

select count(distinct dataclear.cip) from dataclear where dataclear.reportTime='2020-09-24' and cip not in (select dc2.cip from dataclear as dc2 where dc2.reportTime<'2020-09-24');

AvgTime:平均访问时长

select round(avg(deep),2) as viewdeep from (select count(distinct urlname) as deep from flux where reportTime='2020-09-24' group by split(stat_ss,'_')[0]) as tviewdeep;

AvgDeep:平均访问深度

select round(avg(deep),2) as viewdeep from (select count(distinct urlname) as deep from flux where reportTime='2020-09-24' group by split(stat_ss,'_')[0]) as tviewdeep;select round(avg(deep),2) as viewdeep from (select count(distinct urlname) as deep from flux where reportTime='2020-09-24' group by split(stat_ss,'_')[0]) as tviewdeep;

分析结果表

--创建业务表并插入数据
create table tongji(reportTime string,pv int,uv int,vv int, br double,newip int, newcust int, avgtime double,avgdeep double) row format delimited fields terminated by '|';

--计算结果并插入结果表中保存
insert overwrite table tongji select '2020-09-24',tab1.pv,tab2.uv,tab3.vv,tab4.br,tab5.newip,tab6.newcust,tab7.avgtime,tab8.avgdeep from
(select count(*) as pv from dataclear where reportTime = '2020-09-24') as tab1, 
(select count(distinct uvid) as uv from dataclear where reportTime = '2020-09-24') as tab2, 
(select count(distinct ssid) as vv from dataclear where reportTime = '2020-09-24') as tab3,
(select round(br_taba.a/br_tabb.b,4)as br from (select count(*) as a from (select ssid from dataclear where reportTime='2020-09-24' group by ssid having count(ssid) = 1) as br_tab) as br_taba, 
(select count(distinct ssid) as b from dataclear where reportTime='2020-09-24') as br_tabb) as tab4, 
(select count(distinct dataclear.cip) as newip from dataclear where dataclear.reportTime = '2020-09-24' and cip not in (select dc2.cip from dataclear as dc2 where dc2.reportTime < '2020-09-24')) as tab5,
(select count(distinct dataclear.uvid) as newcust from dataclear where dataclear.reportTime='2020-09-24' and uvid not in (select dc2.uvid from dataclear as dc2 where dc2.reportTime < '2020-09-24')) as tab6, 
(select round(avg(atTab.usetime),4) as avgtime from (select max(sstime) - min(sstime) as usetime from dataclear where reportTime='2020-09-24' group by ssid) as atTab) as tab7, 
(select round(avg(deep),4) as avgdeep from (select count(distinct urlname) as deep from dataclear where reportTime='2020-09-24' group by ssid) as adTab) as tab8;

通过sqoop将数据导入mysql

概念

sqoop 沟通hdfs和关系型数据库的桥梁,可以从hdfs导出数据到关系型数据库,也可以从关系型数据库导入数据到hdfs

下载

Apache 提供的工具

安装

要求必须有jdk 和 hadoop的支持,并且有版本要求。
上传到linux中,进行解压
sqoop可以通过JAVA_HOME找到jdk 可以通过HADOOP_HOME找到hadoop所以不需要做任何配置就可以工作。
需要将要连接的数据库的驱动包加入sqoop的lib目录下
使用
在mysql中创建jtlog数据库

--创建数据库
create database jtlog;
--使用数据库
use jtlog;
--创建表
CREATE TABLE jtdata (reportTime varchar(100),pv bigint(20),uv bigint(20),vv bigint(20), br double,newip bigint(20), newcust bigint(20),avgtime double,avgdeep double);

从关系型数据库导入数据到hdfs:

在sqoop的bin目录下执行

./sqoop import --connect jdbc:mysql://192.168.65.1:3306/jtlog --username root --password root --table jtdata -m 1 --target-dir '/sqoop/jtlog' --fields-terminated-by '|';

从hdfs导出数据到关系型数据库:

./sqoop export --connect jdbc:mysql://192.168.65.161:3306/jtlog --username root --password root --export-dir '/user/hive/warehouse/jtlog.db/tongji' --table jtdata -m 1 --fields-terminated-by '|';

Echarts学习使用

  1. 下载js文件
  2. 页面引入js
  3. 创建一个div作为图表的容器,要求必须设置宽高,并定义id
  4. 初始化echarts环境(在div中)
  5. 找到合适的图例
  6. 将图例和初始化好的echarts环境进行绑定
  7. 调整测试
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值