Hive--zeppelin安装及数据清洗项目

一:zeppelin安装

1.上传zeppelin并解压到soft目录文件夹,更名为zeppelin010

2.添加全局变量,并source

#ZEPPELIN
export ZEPPELIN_HOME=/opt/soft/zeppelin010
export PATH=$PATH:$ZEPPELIN_HOME/bin

3.进入/opt/soft/zeppelin010/conf

3.1复制zeppelin-site.xml.template为zeppelin-site.xml,修改zeppelin-site.xml

添加

<property>
  <name>zeppelin.server.addr</name>
  <value>192.168.10.129</value>
  <description>Server binding address</description>
</property>

<property>
  <name>zeppelin.server.port</name>
  <value>8000</value>
  <description>Server port.</description>
</property>
3.2复制zeppelin-env.cmd.template为zeppelin-env.sh,修改zeppelin-env.sh

添加

export JAVA_HOME=/opt/soft/jdk180
export HADOOP_CONF_DIR=/opt/soft/hadoop313/etc/hadoop
3.3拷贝hive文件里的hive-site.xml到当前目录
cp /opt/soft/hive312/conf/hive-site.xml /opt/soft/zeppelin/conf

4.进入/opt/soft/zeppelin010/interpreter/jdbc文件夹

4.1添加如下jar包
cp /opt/soft/hadoop313/share/hadoop/common/hadoop-common-3.1.3.jar ./
cp /opt/soft/hive/lib/curator-client-2.12.0.jar ./
cp /opt/soft/hive/lib/guava-27.0-jre.jar ./
cp /opt/soft/hive/lib/hive-common-3.1.2.jar ./
cp /opt/soft/hive/lib/hive-jdbc-3.1.2.jar ./
cp /opt/soft/hive/lib/hive-serde-3.1.2.jar ./
cp /opt/soft/hive/lib/hive-service-3.1.2.jar ./
cp /opt/soft/hive/lib/hive-service-rpc-3.1.2.jar ./
cp /opt/soft/hive/lib/libthrift-0.9.3.jar ./
cp /opt/soft/hive/lib/protobuf-java-2.5.0.jar ./
4.2添加外部jar包

此时文件内有如下jar包

5.启动zeppelin

zeppelin-daemon.sh start

此时浏览器输入192.168.10.129:8000即可进入zeppelin中

二:数据导入及数据清洗(zeppelin中操作)

1.前置条件:需创建好四个文件夹并导入元数据
%sh
 hdfs dfs -mkdir -p /shopping/data/customer;
 hdfs dfs -mkdir -p /shopping/data/store;
 hdfs dfs -mkdir -p /shopping/data/review;
 hdfs dfs -mkdir -p /shopping/data/transaction;
 hdfs dfs -ls /shopping/data;
 hdfs dfs -put /opt/kb23/shoppingproject/customer_details.csv /shopping/data/customer;
 hdfs dfs -put /opt/kb23/shoppingproject/store_details.csv /shopping/data/store;
 hdfs dfs -put /opt/kb23/shoppingproject/store_review.csv /shopping/data/review;
 hdfs dfs -put /opt/kb23/shoppingproject/transaction_details.csv /shopping/data/transaction;
2.创建数据库,创建表
%hive
create database shopping;
use shopping;

-- 创建ext_customer_details表
create external table if not exists ext_customer_details(
customer_id string,
first_name string,
last_name string,
email string,
gender string,
address string,
country string,
language string,
job string,
credit_type string,
credit_no string
)row format serde 'org.apache.hadoop.hive.serde2.OpenCSVSerde'
location '/shopping/data/customer'
tblproperties("skip.header.line.count"="1");

-- 创建ext_transaction_details表
create external table if not exists ext_transaction_details(
transaction_id string,
customer_id string,
store_id string,
price decimal(8,2),
product string,
purchase_date string,
purchase_time string
)row format serde 'org.apache.hadoop.hive.serde2.OpenCSVSerde'
location '/shopping/data/transactionr'
tblproperties("skip.header.line.count"="1");

-- 创建exists ext_store_details表
create external table if not exists ext_store_details(
store_id string,
store_name string,
employee_number int
)row format serde 'org.apache.hadoop.hive.serde2.OpenCSVSerde'
location '/shopping/data/store'
tblproperties("skip.header.line.count"="1");

-- 创建exists ext_store_review表
create external table if not exists ext_store_review(
transaction_id string,
store_id string,
review_score int
)row format serde 'org.apache.hadoop.hive.serde2.OpenCSVSerde'
location '/shopping/data/review'
tblproperties("skip.header.line.count"="1");
3.数据清洗

要求

3.1过滤掉store_review中没有评分的数据
use shopping;
create view if not exists vw_store_review as
select transaction_id,store_id,review_score from ext_store_review where review_score <> '';
3.2找出PII (personal information identification) 或PCI (personal confidential information) 数据进行加密或hash
use shopping;
-- select * from ext_customer_details;
create view vw_customer_details1 as 
select customer_id,first_name,
last_name,
unbase64(last_name) as lastname,
unbase64(email) as email,
gender,
unbase64(address) as address,
country,
language,
job,
credit_type,
unbase64(concat(unbase64(credit_no),'kb23')) as credit_no
from ext_customer_details;
3.3对transaction_details中的重复数据生成新ID;重新组织transaction数据按照日期YYYY-MM做分区
use shopping;
drop table if exists transaction_details;
-- 创建交易详情分区表
create table if not exists transaction_details(
transaction_id string,
customer_id string,
store_id string,
price decimal(8,2),
product string,
purchase_date string,
purchase_time date
)
partitioned by (purchase_month string);
select * from transaction_details;

set hive.exec.dynamic.partition=true;
set hive.exec.dynamic.partition.mode=nonstrict;

with 
base as (
select  
transaction_id,customer_id,store_id,price,product,purchase_date,purchase_time,
from_unixtime(unix_timestamp(purchase_date, 'yyyy-MM-dd'),'yyyy-MM') as purchase_month,
row_number() over(partition by transaction_id order by store_id) as rn
from ext_transaction_details
)
insert overwrite table transaction_details partition(purchase_month)
select 
if(rn=1,transaction_id, concat(transaction_id,'_fix_',rn)) as transaction_id,
customer_id,
store_id,
price,
product,
purchase_date,
purchase_time, 
purchase_month 
from base;

三:数据分析

(一):customer分析

1.找出顾客最常用的信用卡

select credit_type,count(1) cc from vw_customer_details
       group by credit_type
       order by cc desc limit 10;

2.找出客户资料中排名前五的职位名称

select job,count(customer_id) cc 
from vw_customer_details 
group by job 
order by cc desc limit 5;

3.在美国女性最常用的信用卡

select credit_type,count(1) co from vw_customer_details 
where country='United States' and gender = 'Female'
group by credit_type order by co desc limit 5;

4.按性别和国家进行客户统计

select country,gender,count(1) from vw_customer_details 
group by country,gender;

(二)transaction分析

1.计算每月总收入

select purchase_month,sum(price) from transaction_details group by purchase_month;

2.计算每个季度的总收入

with p1 as (
select concat_ws('_',cast(year(purchase_date) as string),cast(ceil(month(purchase_date)/3) as string)) as quarter,price
from transaction_details
)
select quarter,sum(price) from p1 group by quarter

 

3.按年计算总收入

with p1 as(
select substr(purchase_date,1,4) py,price from transaction_details
)
select py,sum(price) from p1 group by py

 

4.按工作日计算总收入

with p1 as (
select dayofweek(purchase_date) wod,price from transaction_details
)select wod,sum(price) from p1 group by wod having wod between 2 and 6;

 

5.按照工作日、月、季度、年计算总收入

 

with basetb as(
SELECT
    price,
    dayofweek(purchase_date) as weekday,
    month(purchase_date) as month,
    concat_ws('-',cast(year(purchase_date) as string),cast(ceil(month(purchase_date)/3) as string)) as quarter,
    year(purchase_date) as year
from transaction_details)
select sum(price) as sumMoney,weekday,month,quarter,year from basetb group by weekday,month,quarter,year

6.按时间段计算总收入,平均收入(需要清理数据)

观察purchase_time,有些时间是二十四小时制,有些是十二小时制,需要将其统一

-- 时间段 early morning:5:00-8:00  morning:8:00-11:00  noon:11:00-13:00  afternoon:13:00-18:00  evening:18:00-22:00  night:22:00-5:00 --
with base as (
select price,purchase_time,
if(purchase_time like '%M',from_unixtime(unix_timestamp(purchase_time,'hh:mm aa'),'HH:MM'),purchase_time) as time_format
from transaction_details),
timeformat as (select price,purchase_time,time_format,(cast(split(time_format,':')[0] as decimal(4,2))
        + cast(split(time_format,':')[1] as decimal(4,2)) / 60) as purchase_time_in_hr from base),
timebucket as (
select price,purchase_time,time_format,purchase_time_in_hr,
if(purchase_time_in_hr >5 and purchase_time_in_hr <=8 , 'early morning',
if(purchase_time_in_hr >8 and purchase_time_in_hr <=11 , 'morning',
if(purchase_time_in_hr >11 and purchase_time_in_hr <=13 , 'noon',
if(purchase_time_in_hr >13 and purchase_time_in_hr <=18 , 'afternoon',
if(purchase_time_in_hr >18 and purchase_time_in_hr <=22 , 'evening','night'))))) as time_bucket
from timeformat)
select time_bucket,sum(price)/1000 sumprice ,avg(price) avgprice from timebucket group by time_bucket;

 

 7.按工作日计算平均消费

with p1 as(
select dayofweek(purchase_date) dow,price from transaction_details
)select dow,avg(price) from p1 group by dow having dow between 2 and 6;

8.计算年、月、日的交易总数 

select
    count(1) over(partition by year(purchase_date)) year,
    count(1) over(partition by year(purchase_date),month(purchase_date)) month,
    count(1) over(partition by year(purchase_date),month(purchase_date),day(purchase_date)) day
from  transaction_details

 

9.找出交易量最大的10个客户

select customer_id,count(1) cc from transaction_details group by customer_id order by cc desc limit 10;

 

10.找出消费最多的前10位顾客

 

select customer_id,sum(price) cc from transaction_details group by customer_id order by cc desc limit 10;

11.统计该期间交易数量最少的用户

 

select customer_id,count(1) cc from transaction_details group by customer_id order by cc  limit 5;

12.计算每个季度的独立客户总数

with
 p1 as (
 select row_number () over (partition by customer_id,year(purchase_date),quarter(purchase_date)) rn,* from transaction_details
 ),
 p2 as (
 select * from p1 where p1.rn=1 
 )
 select customer_id,count(transaction_id) from p2 group by customer_id;

 13.计算整个活动客户平均花费的最大值

with p1 as (
select customer_id,sum(price) sum from transaction_details group by customer_id)
select avg(sum) avg,customer_id from p1 group by customer_id order by avg desc limit 1; 

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 1
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值