Hive基本SQL操作

库的创建与删除

官网
LanguageManual文档

建库语句

CREATE [REMOTE] (DATABASE|SCHEMA) [IF NOT EXISTS] database_name
  [COMMENT database_comment]
  [LOCATION hdfs_path]
  [MANAGEDLOCATION hdfs_path]
  [WITH DBPROPERTIES (property_name=property_value, ...)];

-- 建库
create database if not exists shopping
comment "store all basket data"
location '/hive_data/shopping.db'
with dbproperties ('purpose' = 'test');

删除库

DROP (DATABASE|SCHEMA) [IF EXISTS] database_name [RESTRICT|CASCADE];

drop database dbname cascade; -- 强制删除库

修改

ALTER (DATABASE|SCHEMA) database_name SET DBPROPERTIES (property_name=property_value, ...);
 
ALTER (DATABASE|SCHEMA) database_name SET OWNER [USER|ROLE] user_or_role; 
  
ALTER (DATABASE|SCHEMA) database_name SET LOCATION hdfs_path; 

表的建删改查

  • 字段的类型
数据类型
  : primitive_type 原始类型
  | array_type 数组类型
  | map_type map类型
  | struct_type 结构体类型
  | union_type   混合类型

原始类型
  : TINYINT
  | SMALLINT
  | INT
  | BIGINT
  | BOOLEAN
  | FLOAT
  | DOUBLE
  | DOUBLE PRECISION -- (Note: Available in Hive 2.2.0 and later)
  | STRING
  | BINARY      -- (Note: Available in Hive 0.8.0 and later)
  | TIMESTAMP   -- (Note: Available in Hive 0.8.0 and later)
  | DECIMAL     -- (Note: Available in Hive 0.11.0 and later)
  | DECIMAL(precision, scale)  -- (Note: Available in Hive 0.13.0 and later)
  | DATE        -- (Note: Available in Hive 0.12.0 and later)
  | VARCHAR     -- (Note: Available in Hive 0.12.0 and later)
  | CHAR        -- (Note: Available in Hive 0.13.0 and later)

数组类型
  : ARRAY < data_type >

map类型
  : MAP < primitive_type, data_type >

结构体类型
  : STRUCT < col_name : data_type [COMMENT col_comment], ...>

混合类型
   : UNIONTYPE < data_type, data_type, ... >
  		 
-- 查看表结构
desc formatted tablename;
desc formatted extended tablename;
  • 行格式
 		行格式规范
		row_format
 		 : DELIMITED [FIELDS TERMINATED BY char [ESCAPED BY char]] [COLLECTION ITEMS 	TERMINATED BY char]
 	       [MAP KEYS TERMINATED BY char] [LINES TERMINATED BY char]
	       [NULL DEFINED AS char]  
  			| SERDE serde_name [WITH SERDEPROPERTIES (property_name=property_value, 				property_name=property_value, ...)]
  • 文件存储格式:
文件存储格式:
		file_format:
 		 : SEQUENCEFILE
 		 | TEXTFILE    
 		 | RCFILE      
 		 | ORC         
 		 | PARQUET     
 		 | AVRO        
 		 | JSONFILE    
 		 | INPUTFORMAT input_format_classname OUTPUTFORMAT output_format_classname
  • 表约束
column_constraint_specification: -- 字段约束
  : [ PRIMARY KEY
    |UNIQUE
    |NOT NULL
    |DEFAULT [default_value]
    |CHECK  [check_expression] ENABLE
    |DISABLE NOVALIDATE RELY/NORELY ]

default_value:
  : [ LITERAL|CURRENT_USER()|CURRENT_DATE()|CURRENT_TIMESTAMP()|NULL ]

constraint_specification:
  : [, PRIMARY KEY (col_name, ...) DISABLE NOVALIDATE RELY/NORELY ]
    [, PRIMARY KEY (col_name, ...) DISABLE NOVALIDATE RELY/NORELY ]
    [, CONSTRAINT constraint_name FOREIGN KEY (col_name, ...) REFERENCES table_name(col_name, ...) DISABLE NOVALIDATE
    [, CONSTRAINT constraint_name UNIQUE (col_name, ...) DISABLE NOVALIDATE RELY/NORELY ]
    [, CONSTRAINT constraint_name CHECK [check_expression] ENABLE|DISABLE NOVALIDATE RELY/NORELY ]

建表语句的结构

1. 自定义创建表
CREATE [TEMPORARY] [EXTERNAL] TABLE [IF NOT EXISTS] [db_name.]table_name
  [(col_name 数据类型 [column_constraint_specification] [COMMENT 字段描述信息], ... ]
  [COMMENT 表的描述]
  [PARTITIONED BY (col_name 数据类型 [COMMENT 字段描述信息], ...)] -- 分区字段不能出现在上面
  [CLUSTERED BY (col_name, col_name, ...) [SORTED BY (col_name [ASC|DESC], ...)] INTO num_buckets BUCKETS]
  [SKEWED BY (col_name, col_name, ...) ] -- 数据倾斜
     ON ((col_value, col_value, ...), (col_value, col_value, ...), ...)
     [STORED AS DIRECTORIES]
  [
   [ROW FORMAT 行格式] -- 怎么分割字段
   [STORED AS file_format] -- 文件存储类型 默认是TextFIle
     | STORED BY 'storage.handler.class.name' [WITH SERDEPROPERTIES (...)]
  ]
  [LOCATION hdfs_path]  -- 存在hdfs路径
  [TBLPROPERTIES (property_name=property_value, ...)]   -- 表的变量
  [AS select_statement];   -- 通过select as 创建表 外部表不支持

2. 通过复制表结构创建表
   		CREATE [TEMPORARY] [EXTERNAL] TABLE [IF NOT EXISTS] [db_name.]table_name
  			LIKE existing_table_or_view_name
  		[LOCATION hdfs_path];
  
3. 通过查询创建表 -- 不能是外部表,不能是分桶表
  CREATE TABLE new_key_value_store 
  ROW FORMAT SERDE "org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe" STORED AS RCFile 
  AS SELECT
  ( KEY % 1024 ) new_key,
  concat( KEY, VALUE ) key_value_pair 
  FROM
    key_value_store SORT BY new_key,
    key_value_pair;
   
-- 查看表的结构
   DESCRIBE EXTENDED table_name
-- 表的属性
   TBLPROPERTIES ("comment"="table_comment")
   TBLPROPERTIES ("hbase.table.name"="table_name") -- 集成hbase
   TBLPROPERTIES ("immutable"="true") or ("immutable"="false")
   TBLPROPERTIES ("skip.header.line.count"="2") -- 跳过文件前两行
   TBLPROPERTIES ("orc.compress"="ZLIB") or ("orc.compress"="SNAPPY") or ("orc.compress"="NONE")
   TBLPROPERTIES ("transactional"="true") or ("transactional"="false") -- 支持事务
   TBLPROPERTIES ("NO_AUTO_COMPACTION"="true") or ("NO_AUTO_COMPACTION"="false") -- 跟事务相关
   TBLPROPERTIES ("compactor.mapreduce.map.memory.mb"="mapper_memory") -- 跟事务相关
   TBLPROPERTIES ("compactor.threshold.hive.compactor.delta.num.threshold"="threshold_num")
   TBLPROPERTIES ("compactor.threshold.hive.compactor.delta.num.threshold"="threshold_num")
   TBLPROPERTIES ("compactorthreshold.hive.compactor.delta.pct.threshold"="threshold_pct")
   TBLPROPERTIES ("auto.purge"="true") or ("auto.purge"="false") 
   TBLPROPERTIES ("EXTERNAL"="TRUE")
   TBLPROPERTIES ("external.table.purge"="true")

表相关的SQL语句

SHOW TABLES;
SHOW TABLES 'page.*';
SHOW PARTITIONS page_view;

DESCRIBE page_view;
DESCRIBE EXTENDED page_view;
DESCRIBE EXTENDED page_view PARTITION (ds='2008-08-08');

-- 表修改
ALTER TABLE old_table_name RENAME TO new_table_name;
ALTER TABLE old_table_name REPLACE COLUMNS (col1 TYPE, ...);
ALTER TABLE tab1 ADD COLUMNS (c1 INT COMMENT 'a new int column', c2 STRING DEFAULT 'def val');

-- 删除分区数据
ALTER TABLE pv_users DROP PARTITION (ds='2008-08-08')

-- 清空表
truncate table psn22;

-- 删除表
DROP TABLE pv_users;

建表语句

创建普通表
create table psn(id int,
    name string,
    likes array<string>,
    address map<string,string>
);
-- 保存在default目录下
创建字段限制的表

create table constraints1(id1 integer UNIQUE disable novalidate, id2 integer NOT NULL,
  usr string DEFAULT current_user(), price double CHECK (price > 0 AND price <= 1000));
 
create table constraints2(id1 integer, id2 integer,
  constraint c1_unique UNIQUE(id1) disable novalidate);
 
create table constraints3(id1 integer, id2 integer,
  constraint c1_check CHECK(id1 + id2 > 0));

创建hive表 create like语法
CREATE EXTERNAL TABLE `TO_SONG_INFO_D` 
like TO_SONG_INFO_D_TMP
STORED AS orc
LOCATION 'hdfs://mycluster/project/musicproject.db/ods/song/TO_SONG_INFO_D'
TBLPROPERTIES('orc.compress'='snappy','orc.create.index'='true');


-- 临时表
CREATE TEMPORARY TABLE `TO_SONG_INFO_D_TMP`(
 `NBR` string comment '歌曲编号',
 `NAME` string comment '歌曲名',
 `OTHER_NAME` string comment '歌曲别名',
 `SOURCE` int comment '来源',
 `ALBUM` string comment '专辑',
 `PRDCT` string comment '发行公司',
 `LANG` string comment '语言',
 `VIDEO_FORMAT` string comment '视频风格',
 `DUR` int comment '时长/秒',
 `SINGER_INFO` string comment '歌手信息',
 `POST_TIME` string comment '发行时间',
 `PINYIN_FST` string COMMENT '歌曲首字母' ,
 `PINYIN` string COMMENT '歌曲全拼',
 `SING_TYPE` int  COMMENT '演唱类型',
 `ORI_SINGER` string  COMMENT '原唱歌手',
 `LYRICIST` string  COMMENT '填词',
 `COMPOSER` string  COMMENT '作曲',
 `BPM_VAL` int  COMMENT 'BPM值',
 `STAR_LEVEL` int  COMMENT '星级',
 `VIDEO_QLTY` int  COMMENT '视频画质',
 `VIDEO_MK` int  COMMENT '视频制作方式',
 `VIDEO_FTUR` int  COMMENT '视频画面特征',
 `LYRIC_FTUR` int  COMMENT '歌词字母特点',
 `IMG_QLTY` int COMMENT '画质评价',
 `SUBTITLES_TYPE` int  COMMENT '字幕类型',
 `AUDIO_FMT` int COMMENT '音频格式',
 `ORI_SOUND_QLTY` int COMMENT '原唱音质',
 `ORI_TRK` int  COMMENT '音轨',
 `ORI_TRK_VOL` int COMMENT '原唱音量',
 `ACC_VER` int  COMMENT '伴唱版本',
 `ACC_QLTY` int  COMMENT '伴唱音质',
 `ACC_TRK_VOL` int COMMENT '伴唱音量',
 `ACC_TRK` int  COMMENT '伴唱音轨',
 `WIDTH` int COMMENT '视频分辨率W',
 `HEIGHT` int  COMMENT '视频分辨率H',
 `VIDEO_RSVL` int  COMMENT '视频分辨率',
 `SONG_VER` int  COMMENT '编曲版本',
 `AUTH_CO` string  COMMENT '授权公司',
 `STATE` int  COMMENT '状态',
 `PRDCT_TYPE` string  COMMENT '产品类型'
)
COMMENT '歌曲歌手信息表'
ROW FORMAT DELIMITED  FIELDS TERMINATED BY '\t'
LOCATION 'hdfs://mycluster/project/musicproject.db/ods/song/TO_SONG_INFO_D_TMP';
创建自定义行格式的hive表
create table psn2(
    id int,
    name string,
    likes array<string>,
    address map<string,string>
) 
row format delimited 
    fields terminated by ',' 
    collection items terminated by '-' 
    map keys terminated by ':';
    
cat >> hive_data.txt <<-EOF
1,小明1,抽烟-喝酒-烫头,上海:静安
EOF

上传到默认库的表目录下
hdfs dfs -put hive_data.txt /user/hive_remote/warehouse/psn2/

hive SQL查询
select * from psn2;
1       小明1   ["抽烟","喝酒","烫头"]  {"上海":"静安"}
创建默认分隔符的hive表(A、B、^C)
create table psn3
(
    id int,
    name string,
    likes array<string>,
    address map<string,string>
)
row format delimited
    fields terminated by '\001'
    collection items terminated by '\002'
    map keys terminated by '\003';
--或者
create table psn3
(
    id int,
    name string,
    likes array<string>,
    address map<string,string>
);
创建hive的外部表(需要添加external和location的关键字)
-- hdfs dfs -mkdir /data/hive/
create external table psn4
(
    id int,
    name string,
    likes array<string>,
    address map<string,string>
)
row format delimited
    fields terminated by ','
    collection items terminated by '-'
    map keys terminated by ':'
location '/data/hive';


--加载本地数据到hive表
load  data local inpath '/tmp/hive_data.txt' into table psn4;

--加载hdfs数据文件到hive表
load data inpath '/data/data' overwrite into table psn4;--(/data/data指的是hdfs的目录)
内部表和外部表的区别
内部表跟外部表的区别:
	1. 内部表在创建时默认存储在hive默认的存储目录,外部表需要在创建时指定
	2. 删除内部表,元数据和数据都被删除。删除外部表,只是删除元数据,不会删除数据
	
	应用场景:
		内部表:需要先创建表,然后向表中添加数据,适合做中间表的存储
		外部表:可以先创建表,再添加数据,也可以先有数据,再创建表,本质上是将hdfs的某一个目录的数据跟hive的表关联映射起来,因此适合原始数据的存储,不会因为误操作将数据给删除掉
外部表
-- 创建外部表 external关键字
create external table psn7
(
    id int,
    name string,
    likes array<string>,
    address map<string,string>
)
partitioned by(age int)
row format delimited
    fields terminated by ','
    collection items terminated by '-'
    map keys terminated by ':'
location '/chauncy';
--查询结果(没有数据)
	select * from psn7;
--修复分区
	msck repair table psn7;
--查询结果(有数据)
	select * from psn7;
创建分区表
/*
	hive的分区表:
		hive默认将表的数据保存在某一个hdfs的存储目录下,当需要检索符合条件的某一部分数据的时候,需要全量遍历数据,IO量比较大,效率比较低,因此可以采用分而治之的思想,将符合某些条件的数据放置在某一个目录,此时检索的时候只需要搜索指定目录即可,不需要全量遍历数据。
*/

/*
	注意:
		1、当创建完分区表之后,在保存数据的时候,会在hdfs目录中看到分区列会成为一个目录,以多级目录的形式存在
		2、当创建多分区表之后,插入数据的时候不可以只添加一个分区列,需要将所有的分区列都添加值
		3、多分区表在添加分区列的值的时候,与顺序无关,与分区表的分区列的名称相关,按照名称就行匹配
*/	

/*
	注意:
		1、添加分区列的值的时候,如果定义的是多分区表,那么必须给所有的分区列都赋值
		2、删除分区列的值的时候,无论是单分区表还是多分区表,都可以将指定的分区进行删除
*/




-- 给分区表添加分区列的值
	等于在hdfs新建了一个目录
	alter table table_name add partition(col_name=col_value)
-- 删除分区列的值
	alter table table_name drop partition(col_name=col_value)
	
-- 查看分区信息
show partitions psn5;
创建单分区表
-- 创建单分区表
create table psn5
(
    id int,
    name string,
    likes array<string>,
    address map<string,string>
)
partitioned by(gender string)
row format delimited
    fields terminated by ','
    collection items terminated by '-'
    map keys terminated by ':';

-- 准备数据
cat >> hive_data.txt <<-EOF
1,小明1,抽烟-喝酒-烫头,上海:静安
EOF

cat > /tmp/hive_data_women.txt <<-EOF
1,小娟,抽烟-喝酒-烫头,上海:静安
2,小红,--,上海:静安
3,小花,追剧-宅家-买衣服,广东:广州
EOF

-- 加载数据方式一,直接上传数据到hdfs
dfs -mkdir /user/hive_remote/warehouse/psn5/gender=man;
dfs -mkdir /user/hive_remote/warehouse/psn5/gender=women;
dfs -put /tmp//hive_data.txt /user/hive_remote/warehouse/psn5/gender=man;
dfs -put /tmp//hive_data_women.txt /user/hive_remote/warehouse/psn5/gender=women;
-- 查询结果(没有数据)
	select * from psn5;
-- 没有更新元数据,修复分区
msck repair table psn5;
-- 查询结果(有数据)
	select * from psn5;


-- 加载数据方式一,load data
-- 向gender='man'分区添加数据
load data local inpath '/tmp/hive_data.txt' into table psn5 partition(gender="man");

-- 向gender='women'分区添加数据
load data local inpath '/tmp/hive_data_women.txt' into table psn5 partition(gender="women");


-- 查询数据
select * from psn5 where gender='man';
select * from psn5 where gender='women';
创建多分区表
-- 创建多分区表
create table psn6
(
    id int,
    name string,
    likes array<string>,
    address map<string,string>
)
partitioned by(gender string,age int)
row format delimited
    fields terminated by ','
    collection items terminated by '-'
    map keys terminated by ':';
    
-- 准备数据
cat >> hive_data.txt <<-EOF
1,小明1,抽烟-喝酒-烫头,上海:静安
EOF

cat > /tmp/hive_data_women.txt <<-EOF
1,小娟,抽烟-喝酒-烫头,上海:静安
2,小红,--,上海:静安
3,小花,追剧-宅家-买衣服,广东:广州
EOF

-- 加载数据方式一,直接上传数据到hdfs,太麻烦
-- 加载数据方式二,load data
-- 向gender='man'分区添加数据
cat >> hive_data_man_18.txt <<-EOF
1,小明1,抽烟-喝酒-烫头,上海:静安
EOF
load data local inpath '/tmp/hive_data_man_18.txt' into table psn6 partition(gender="man",age=18);

-- 向gender='women'分区添加数据
cat > /tmp/hive_data_women_24.txt <<-EOF
1,小娟,抽烟-喝酒-烫头,上海:静安
2,小红,--,上海:静安
3,小花,追剧-宅家-买衣服,广东:广州
EOF
load data local inpath '/tmp/hive_data_women_24.txt' into table psn6 partition(gender="women",age=24);


-- 查询数据
select * from psn6 where gender='man' and age=18;
select * from psn6 where gender='women';
动态分区
名称默认值描述
hive.exec.dynamic.partitiontrue设置为true用于打开动态分区功能
hive.exec.dynamic.partition.modestrict设置为nonstrict能够让所有的分区都动态被设定,否则的话至少需要指定一个分区值
hive.exec.max.dynamic.partitions.pernode100每一个执行mr节点上,允许创建的动态分区的最大数量(100)
hive.exec.max.dynamic.partitions+1000所有执行mr节点上,允许创建的所有动态分区的最大数量(1000)
hive.exec.max.created.files100000所有的mr job允许创建的文件的最大数量(100000)
-- hive设置hive动态分区开启
	set hive.exec.dynamic.partition=true;
	默认:true
-- hive的动态分区模式
	set hive.exec.dynamic.partition.mode=nostrict;
	默认:strict(至少有一个分区列是静态分区)
-- 每一个执行mr节点上,允许创建的动态分区的最大数量(100)
	set hive.exec.max.dynamic.partitions.pernode;
-- 所有执行mr节点上,允许创建的所有动态分区的最大数量(1000)	
	set hive.exec.max.dynamic.partitions;
-- 所有的mr job允许创建的文件的最大数量(100000)	
	set hive.exec.max.created.files;

-- 插入数据
INSERT OVERWRITE TABLE employees
PARTITION (country, state)
SELECT ..., se.cty, se.st
FROM staged_employees se;

-- 插入数据,静态分区要写在前面
INSERT OVERWRITE TABLE employees
PARTITION (country = 'US', state)
SELECT ..., se.cnty, se.st
FROM staged_employees se
WHERE se.cnty = 'US';

-- 例子
from psn21 pp
insert [overwrite|into] table psn22 partition(age,gender)
select pp.id,pp.name,pp.likes,pp.address,pp.age,pp.gender;
  • 例子
  --- 准备数据
  cat > dynamic_data.txt <<-EOF
  1,小明1,11,man,抽烟-喝酒-烫头,北京:王府井-上海:浦东
  2,小明2,12,women,lol-book-movie,beijing:aaa-shanghai:jingan
  3,小明3,13,man,抽烟-喝酒-烫头,北京:王府井-上海:浦东
  4,小明4,13,women,lol-book-movie,beijing:aaa-shanghai:jingan
  5,小明5,14,man,抽烟-喝酒-烫头,北京:王府井-上海:浦东
  6,小明6,15,women,lol-book-movie,beijing:aaa-shanghai:jingan
  7,小明7,16,man,抽烟-喝酒-烫头,北京:王府井-上海:浦东
  EOF
  
  hdfs dfs -mkdir /hive_dynamic
  hdfs dfs -put dynamic_data.txt /hive_dynamic
  
  -- 创建普通表
  create table psn21
  (
      id int,
      name string,
      age int,
      gender string,
      likes array<string>,
      address map<string,string>
  )
  row format delimited
      fields terminated by ','
      collection items terminated by '-'
      map keys terminated by ':';
  
  -- 加载数据到普通表
  load data inpath '/hive_dynamic/dynamic_data.txt' overwrite into table psn21;
  
  -- 动态分区表的创建
  create table psn22
  (
  id int,
  name string,
  likes array<string>,
  address map<string,string>
  )
  partitioned by (age int,gender string)
  row format delimited
  fields terminated by ','
  collection items terminated by '-'
  map keys terminated by ':';
  
  -- 动态分区表加载数据,自动创建分区
  set hive.exec.dynamic.partition.mode=nonstrict;
  -- 查询的顺序要跟定义的顺序一致,分区的顺序也要一致
  insert into table psn22 
  	partition(age,gender) 
  	select id,name,likes,address,age,gender from psn21;
  
  -- 等价
  from psn21 pp
  insert overwrite table psn22 partition(age,gender)
  select pp.id,pp.name,pp.likes,pp.address,pp.age,pp.gender;
  
  -- 追加
  from psn21 pp
  insert into table psn22 partition(age,gender)
  select pp.id,pp.name,pp.likes,pp.address,pp.age,pp.gender;
  
  -- 清空表数据
  truncate table psn22;
  
  -- 删除分区的数据
  alter table psn22 drop partition(gender='man');
  alter table psn22 drop partition(age=13);
  
  
  
-- ------------------------------------
严格模式
set hive.exec.dynamic.partition.mode=strict;

create table dptest
(
id int,
name string,
likes array<string>,
address map<string,string>
)
partitioned by (age,int,sex string)
row format delimited 
fields terminated by ','
collection items terminated by '-'
map keys terminated by ":";


insert into table dptest partiton(age=18,sex) select id,name,likes,address,/*age,*/ gender from psn21;

分桶表的创建
CREATE TABLE page_view(viewTime INT, userid BIGINT,
     page_url STRING, referrer_url STRING,
     ip STRING COMMENT 'IP Address of the User')
 COMMENT 'This is the page view table'
 PARTITIONED BY(dt STRING, country STRING)
 CLUSTERED BY(userid) SORTED BY(viewTime) INTO 32 BUCKETS
 ROW FORMAT DELIMITED
   FIELDS TERMINATED BY '\001'
   COLLECTION ITEMS TERMINATED BY '\002'
   MAP KEYS TERMINATED BY '\003'
 STORED AS SEQUENCEFILE;
 
 
-- 例子
-- 创建分桶表
CREATE TABLE user_info_bucketed(user_id BIGINT, firstname STRING, lastname STRING)
COMMENT 'A bucketed copy of user_info'
PARTITIONED BY(ds STRING)
CLUSTERED BY(user_id) INTO 256 BUCKETS;

-- 插入数据
FROM user_id
INSERT OVERWRITE TABLE user_info_bucketed
PARTITION (ds='2009-02-25')
SELECT userid, firstname, lastname WHERE ds='2009-02-25';
约束表的创建
create table constraints1(id1 integer UNIQUE disable novalidate, id2 integer NOT NULL,
  usr string DEFAULT current_user(), price double CHECK (price > 0 AND price <= 1000));
  
insert into table constraints1(id1,id2,price) values(1,24,999);

insert into table constraints1(id1,id2,price) values(2,24,1999); -- 会报错

修改表的属性

ALTER TABLE table_name RENAME TO new_table_name; -- 重命名表名
ALTER TABLE table_name SET TBLPROPERTIES ("name"="chauncy"); -- 修改/新增表属性
ALTER TABLE table_name SET TBLPROPERTIES ('comment' = new_comment); -- 更改表注释
ALTER TABLE table_name CLUSTERED BY (col_name, col_name, ...) [SORTED BY (col_name, ...)]
  INTO num_buckets BUCKETS; -- 更改表存储属性
alter table haha unset tblproperties ("name") -- 取消属性
  
ALTER TABLE table_name SKEWED BY (col_name1, col_name2, ...)
  ON ([(col_name1_value, col_name2_value, ...) [, (col_name1_value, col_name2_value), ...]
  [STORED AS DIRECTORIES]; -- 改变表倾斜
  
ALTER TABLE table_name NOT SKEWED;  -- 改变表倾斜

ALTER TABLE table_name ADD [IF NOT EXISTS] PARTITION partition_spec [LOCATION 'location'][, PARTITION partition_spec [LOCATION 'location'], ...];       
       
-- 修改列
ALTER TABLE table_name [PARTITION partition_spec] CHANGE [COLUMN] col_old_name col_new_name column_type
  [COMMENT col_comment] [FIRST|AFTER column_name] [CASCADE|RESTRICT];
       
CREATE TABLE test_change (a int, b int, c int);
ALTER TABLE test_change CHANGE id locationid int; -- 将字段id改为字段locationid名称
ALTER TABLE test_change CHANGE a1 a2 STRING AFTER b; -- 修改字段名并调整次序
ALTER TABLE test_change CHANGE c c1 INT FIRST;
ALTER TABLE test_change CHANGE a1 a1 INT COMMENT 'this is column a1'; -- 添加注释 
alter table processdb.locationgeospacesatellite0001 clustered by (locationid) into 1 buckets; -- 分桶

删除表

drop table tablename;

truncate table tablename;
TRUNCATE [TABLE] table_name PARTITION (dt="2021-10-15");

添加数据

1. Load数据

-- 语法
LOAD DATA [LOCAL] INPATH hdfs_path INTO TABLE table_name 
	[PARTITION(date='2008-06-08', country='US')]  -- 只能是静态分区

-- 案例
load data inpath 'hdfs://mycluster//project/data_music/all_client_tables/20211214/MINIK_CLIENT_SONG_PLAY_OPERATE_REQ'
into table TO_CLIENT_SONG_PLAY_OPERATE_REQ_D
partition (data_dt='20211214');


-- 动态分区插入数据
FROM page_view_stg pvs
INSERT OVERWRITE TABLE page_view PARTITION(dt='2008-06-08', country)
       SELECT pvs.viewTime, pvs.userid, pvs.page_url, pvs.referrer_url, null, null, pvs.ip, pvs.country

-- 普通表
-- 加载本地数据到hive表
	load data local inpath '/root/data/data' into table psn;--(/root/data/data指的是本地		linux目录)
-- 加载hdfs数据文件到hive表
	load data inpath '/data/data' into table psn;--(/data/data指的是hdfs的目录)

-- 分区表
load data local inpath '/tmp/hive_data_women_24.txt' into table psn6 partition(gender="women",age=24);

/*
	注意:
		1、load操作不会对数据做任何的转换修改操作
		2、从本地linux load数据文件是复制文件的过程
		3、从hdfs load数据文件是移动文件的过程
		4、load操作也支持向分区表中load数据,只不过需要添加分区列的值
		load data local inpath '/home/god/hive_data.txt' into table psn6 partition(gender='man',age=18);
*/

2.查询结果写到hdfs或本地

-- 单个查询
INSERT OVERWRITE [LOCAL] DIRECTORY directory1
  		[ROW FORMAT row_format] [STORED AS file_format]
  		SELECT ... FROM ...

-- 多个查询
FROM from_statement
		INSERT OVERWRITE [LOCAL] DIRECTORY directory1 select_statement1
		[INSERT OVERWRITE [LOCAL] DIRECTORY directory2 select_statement2] ... 
		row format
  		: DELIMITED [FIELDS TERMINATED BY char [ESCAPED BY char]] [COLLECTION ITEMS TERMINATED BY char]
        [MAP KEYS TERMINATED BY char] [LINES TERMINATED BY char]
        [NULL DEFINED AS char]
        
--注意:路径千万不要填写根目录,会把所有的数据文件都覆盖
--将查询到的结果导入到hdfs文件系统中
	insert overwrite directory '/result' select * from psn;
--将查询的结果导入到本地文件系统中
	insert overwrite local directory '/result' select * from psn;

3. Insert select 语句

-- 语法: 通过查询到的数据插入,效率低
INSERT [OVERWRITE|INTO] TABLE tablename1 [PARTITION (partcol1=val1, partcol2=val2 ...) 			[IF NOT EXISTS]] select_statement1 FROM from_statement;

-- 例子
INSERT OVERWRITE TABLE partition(gender='man',age=18) psn6 SELECT id,name where age=18 and gender='man' FROM psn

insert into psn5
    partition (gender='women')
    select 5,'小美2',array("追剧2","宅家2","买衣服2"),map("广东","广州");


-- 多表插入数据
FROM from_statement
    INSERT INTO TABLE tablename1 [PARTITION (partcol1=val1, partcol2=val2 ...)] select_statement1
    [INSERT INTO TABLE tablename2 [PARTITION ...] select_statement2]
    [INSERT OVERWRITE TABLE tablename2 [PARTITION ... [IF NOT EXISTS]] 	select_statement2] ...;

-- 例子
from psn
    insert overwrite table psn9 select id,name 
    insert into table psn10 select id

4.insert value 语句,效率低

-- 语句
INSERT INTO TABLE tablename 
    [PARTITION (partcol1[=val1], partcol2[=val2] ...)]
    VALUES values_row [, values_row ...];
    
-- 例子
--插入数据
	insert into psn5 values(1,'zhangsan')

insert into psn5
    partition (gender='women')
    select 4,'小美',array("追剧","宅家","买衣服"),str_to_map("广东:广州");

insert into psn5
    partition (gender='women')
    select 5,'小美2',array("追剧2","宅家2","买衣服2"),map("广东","广州");

-- 多个分区插入数据
FROM staged_employees se
INSERT OVERWRITE TABLE employees
PARTITION (country = '中国', state = '河北省')
SELECT * WHERE se.cnty = '中国' AND se.st = '河北省'
INSERT OVERWRITE TABLE employees
PARTITION (country = '中国', state = '陕西省')
SELECT * WHERE se.cnty = '中国' AND se.st = '陕西省'
INSERT OVERWRITE TABLE employees
PARTITION (country = '中国', state = '河南省')
SELECT * WHERE se.cnty = 'US' AND se.st = '河南省';

5.上传文件到hdfs

cat > hive_data_hdfs.txt <<-EOF
10,小娟,抽烟-喝酒-烫头,上海:静安
20,小红,--,上海:静安
30,小花,追剧-宅家-买衣服,广东:广州
EOF

hdfs dfs -put hive_data_hdfs.txt /user/hive_remote/warehouse/psn5/gender=women/
--修复分区
msck repair table psn5;

-- 查看数据
select * from psn5;

数据更新和删除

概述

在官网中我们明确看到hive中是支持Update和Delete操作的,但是实际上,是需要事务的支持的,Hive对于事务的支持有很多的限制

//在hive的hive-site.xml中添加如下配置:
	<property>
		<name>hive.support.concurrency</name>
		<value>true</value>
	</property>
	<property>
		<name>hive.enforce.bucketing</name>
		<value>true</value>
	</property>
	<property>
		<name>hive.exec.dynamic.partition.mode</name>
		<value>nonstrict</value>
	</property>
	<property>
		<name>hive.txn.manager</name>
		<value>org.apache.hadoop.hive.ql.lockmgr.DbTxnManager</value>
	</property>
	<property>
		<name>hive.compactor.initiator.on</name>
		<value>true</value>
	</property>
	<property>
		<name>hive.compactor.worker.threads</name>
		<value>1</value>
	</property>

    //操作语句
create table test_transaction (user_id Int,name String) clustered by (user_id) into 3 buckets stored as orc TBLPROPERTIES ('transactional'='true');

insert into table test_transaction values(1,'alice'),(2,'tom'),(3,'link'),(4,'sarry'),(5,'homery'),(6,'banala'),(7,'cukky');
update test_transaction set name='jerrick_up' where user_id=1;
//数据文件
	1,alice
	2,tom
	3,link
	4,sarry
	5,homery
	6,banala
	7,cukky

查询数据

查询语法

SELECT [ALL | DISTINCT] select_expr, select_expr, ...
  FROM table_reference
  [WHERE where_condition]
  [GROUP BY col_list]
  [ORDER BY col_list]
  [CLUSTER BY col_list  -- 分桶
    | [DISTRIBUTE BY col_list] [SORT BY col_list]
  ]
 [LIMIT [offset,] rows]

-- 正则表达式
set hive.support.quoted.identifiers=none;
SELECT `(ds|hr)?+.+` FROM sales;

简单查询

SELECT COUNT(*) FROM table2;


-- 强大的insert
FROM pv_users
INSERT OVERWRITE TABLE pv_gender_sum
  SELECT pv_users.gender, count(DISTINCT pv_users.userid)
  GROUP BY pv_users.gender
INSERT OVERWRITE DIRECTORY '/user/facebook/tmp/pv_age_sum'
  SELECT pv_users.age, count(DISTINCT pv_users.userid)
  GROUP BY pv_users.age;
  
  
-- 分组
select pp.name username,
       pp.age,
       count(1) nums 
from psn22 pp 
       group by pp.name,pp.age;
      

排序

Sort By -- 分区有序
Order By -- 最终有序

-- order by 对结果做全排序
-- In Hive 3.0.0 and later, order by without limit in subqueries and views will be removed by the optimizer. To disable it, set hive.remove.orderby.in.subquery to false
对查询结果做一次全局排序
在严格模式下必须指定输出条数
hive.mapred.mode=strict
limit 10


-- sort by 对单个reduce的数据做排序 -- 分区有序
在reduce前完成排序,只保证每个reduce的输出有序
通过mapred.reduce.tasks=n指定reduce的个数,然后做归并排序即可得到全部结果
reduce数据直接可能有相同的数据,单纯的sort by无法将相同的行分配到同一个reduce


-- distribute by 分区排序
distribute by是控制在map端如何拆分数据给reduce端的
hive会根据distribute by后面列,对应reduce的个数进行分发,默认是采用hash算法
sort by为每个reduce产生一个排序文件,要控制某个特定行应该到哪个reducer 可以使用 distribute by
distribute by经常和sort by配合使用

select * from temperature distribute by year sort by year asc, temper desc;


-- cluster by
cluster by除了具有distribute by的功能外还兼具sort by的功能。但是排序只能是倒叙排序
  • 5
    点赞
  • 20
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值