一、hive数据库操作
<1>创建数据库
create database if not exists db_hive_02 ;
<2>删除数据库
drop database if exists db_hive_02 ;
drop database if exists db_hive_02 cascade;//级联删除
<3>使用数据库
use db_myhive_02;
<4>显示数据库
show databases; //显示所有数据库信息
show databases like 'db_myhive*' //显示以db_myhive开头的数据库
<5>查看数据库的描述信息
desc database db_hive_03 ;//基本信息
desc database extended db_hive_03 ;//详细信息
二、hive表的操作
<1>创建表的三种方式:
方式一:
create table IF NOT EXISTS default.web_log_20171211(
ip string COMMENT 'remote ip address' ,
user string COMMENT '用户',
req_url string COMMENT 'user request url')
COMMENT 'Web Access Logs'
ROW FORMAT DELIMITED FIELDS TERMINATED BY ' '
STORED AS TEXTFILE ;
方式二:
create table IF NOT EXISTS default.web_log_20171211_sub
AS select ip,req_url from web_log_20171211 ;//复制数据和结构
方式三
create table IF NOT EXISTS default.web_log_20171212
like default.web_log_20171211; //只复制结构
<2>删除表
drop table if exists default.web_log_20171212;//删除表
truncate table default.web_log_20171212;//清空表数据,表结构还在
<3>修改表
alter table default.web_log_20171212 rename to default.web_log_20171212.bak;
<4>加载数据
LOAD DATA [LOCAL] INPATH 'filepath'
[OVERWRITE] INTO TABLE tablename [PARTITION (partcol1=val1, partcol2=val2 ...)]
注意:
*LOCAL:从本地文件加载数据到hive表;否则从HDFS加载数据到hive表;
*OVERWRITE:是否覆盖表中已有数据;
<5>外部表和内部表
*内部表也称之为MANAGED_TABLE;
*默认存储在/user/hive/warehouse下,也可以通过location指定;
*删除表时,会删除表数据以及元数据;
*外部表称之为EXTERNAL_TABLE;
*在创建表时可以自己指定目录位置(LOCATION);
*删除表时,只会删除元数据不会删除表数据;
e.g:
create EXTERNAL table IF NOT EXISTS default.emp(
empno int,
ename string,
job string,
mgr int,
hiredate string,
sal double,
comm double,
deptno int
)
ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t'
location '/user/xiaojiangshi/hive/warehouse/emp';
<6>分区表
1.创建分区表
create EXTERNAL table IF NOT EXISTS default.emp_partition(
empno int,
ename string,
job string,
mgr int,
hiredate string,
sal double,
comm double,
deptno int
)
**partitioned by (month string,day string)**
ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t' ;
2.向分区表中加载数据
load data local inpath '/soft/datas/emp.txt' into table default.emp_partition partition (month='201712',day='13') ;
3.查询数据
select * from emp_partition where month = '201712' and day = '13' ;
注意事项:
create table IF NOT EXISTS default.dept_nopart(
deptno int,
dname string,
loc string
)
ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t';
dfs -put /soft/datas/dept.txt /user/hive/warehouse/dept_nopart ;
select * from dept_nopart ; //有数据
create table IF NOT EXISTS default.dept_part(
deptno int,
dname string,
loc string
)
partitioned by (day string)
ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t';
第一种方式
dfs -mkdir -p /user/hive/warehouse/dept_part/day=20150913 ;
dfs -put /opt/datas/dept.txt /user/hive/warehouse/dept_part/day=20150913 ;//无数据
//(原因是未在元数据创建分区信息)
//解决方式一
msck repair table dept_part ;
//解决方式二
alter table dept_part add partition(day='20150914');
show partitions dept_part ;//显示dept_part里面的所有分区
三、hive中数据的操作
<1>加载数据
1.load data [local] inpath ‘filepath’ [overwrite] into table tablename [partition (partcol1=val1,…)];
注意:
* 原始文件存储的位置
* 本地 local
* hdfs
* 对表的数据是否覆盖
* 覆盖 overwrite
* 追加
* 分区表加载,特殊性
partition (partcol1=val1,…)
i)加载本地文件到hive表
load data local inpath '/soft/datas/emp.txt' into table default.emp ;
ii)加载HDFS文件到hive表
load data inpath '/user/xiaojiangshi/emp.txt' into table default.emp ;
iii)加载数据覆盖表中已有的数据
load data inpath '/user/xiaojiangshi/emp.txt' overwrite into table default.emp ;//会删除原有数据
iv)创建表是通过insert加载
create table default.emp_ci like emp ;
insert into table default.emp_ci select * from default.emp ;
v)创建表的时候通过location指定加载
create EXTERNAL table IF NOT EXISTS default.emp(
empno int,
ename string,
job string,
mgr int,
hiredate string,
sal double,
comm double,
deptno int
)
ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t'
location '/user/xiaojiangshi/hive/warehouse/emp';
<2>导出数据
i)insert overwrite …
insert overwrite local directory '/opt/datas/hive_exp_emp'
select * from default.emp ;
ii)修改结果文件格式
insert overwrite local directory '/opt/datas/hive_exp_emp2'
ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t'
select * from default.emp ;
iii)hive -e …
hive -e "select * from default.emp;" > /opt/datas/exp_res.txt //企业用的较多
iv)导出到HDFS文件系统
insert overwrite directory '/user/xiaojiangshi/hive/hive_exp_emp'
select * from default.emp ;
v)sqoop(后续补)
功能:
hdfs/hive -> rdbms
rdbms -> hdfs/hive/hbase
四、hive中的重点
<1>order by、sort by 、distribute by、cluster by
order by
对全局数据的一个排序,仅仅只有个reduce
select * from emp order by empno desc ;sort by
对每一个reduce内部数据进行排序的,全局结果集来说不是排序
set mapreduce.job.reduces= 3;
select * from emp sort by empno asc ;
insert overwrite local directory '/opt/datas/sortby-res' select * from emp sort by empno asc ;
distribute by
分区partition
类似于MapReduce中分区partition,对数据进行分区,结合sort by进行使用
insert overwrite local directory ‘/opt/datas/distby-res’ select * from emp distribute by deptno sort by empno asc ;
注意事项:
distribute by 必须要在sort by 前面。
cluster by
当distribute by和sort by 字段相同时,可以使用cluster by ;
insert overwrite local directory ‘/opt/datas/cluster-res’ select * from emp cluster by empno ;
五、hive udf编程
1)First, you need to create a new class that extends UDF, with one or more methods named evaluate.
2)Usage
add jar /opt/datas/hiveudf.jar ;
create temporary function my_lower as “com.xiaojiangshi.senior.hive.udf.LowerUDF” ;
select ename, my_lower(ename) lowername from emp limit 5 ;