1:创建Hive 和hBase 关系表
use order;
CREATE TABLE tmp_member_info_new_mysql
(memberkey string, memberid string,uid string,email string, fullname string,sex string,
birthday string,province string,area string, edulevel string,
unitname string, logdata string,domain string)
STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
WITH SERDEPROPERTIES
("hbase.columns.mapping" = ":key#b,info:memberid,info:uid,info:email,info:fullname,info:sex,
info:birthday,info:province,info:area,info:edulevel,
info:unitname,info:logdata,info:domain")
TBLPROPERTIES ("hbase.table.name"="tmp_member_info_new_mysql");
2:导入数据到hive 和hbase :
rowkey=memberid_uid
insert overwrite table tmp_member_info_new_mysql
电脑技术网认为此文章对《mysql表名大小写混合Hive—Hbase—Sqoop—Mysql》说的很在理,为你提供最佳的学习编程,程序员。
select concat_ws('_',lpad(uid,10,'0'),memberid) as memberkey, memberid,uid, email, fullname,sex ,
birthday,province,area, edulevel,unitname,logdata,domain
from order.tmp_member_info_new ;
3: 导出到对应的hdfs文件系统目录下::hadoop fs -mkdir /user/cdel/tmp_member_info_new_mysql
insert overwrite directory '/user/cdel/tmp_member_info_new_mysql_20150420'
select me.memberid,me.uid,me.email,me.fullname,me.sex,me.birthday,me.province,me.area,me.edulevel,me.unitname,me.logdata,me.domain
from order.tmp_member_info_new_mysql me where me.logdata <='20150420';
insert overwrite directory '/user/cdel/tmp_member_info_new_mysql_20150421'
select me.memberid,me.uid,me.email,me.fullname,me.sex,me.birthday,me.province,me.area,me.edulevel,me.unitname,me.logdata,me.domain
from order.tmp_member_info_new_mysql me where me.logdata >='20150421';
3:使用Sqoop导出到mysql中:
sqoop export --connect jdbc:mysql://10.223.0.61:3306/cdellog --username cdel --password hdcdel20141217 -m 2 --table member_info_new --columns "memberid,uid,email,fullname,sex,birthday,province,area,edulevel,unitname,logdata,domain" --export-dir /user/cdel/tmp_member_info_new_mysql
--fields-terminated-by ' 01';
sqoop export --connect jdbc:mysql://10.223.0.61:3306/cdellog --username cdel --password hdcdel20141217 -m 2 --table member_info_new --columns "memberid,uid,email,fullname,sex,birthday,province,area,edulevel,unitname,logdata,domain" --export-dir /user/cdel/tmp_member_info_new_mysql_20150420
--fields-terminated-by ' 01';
sqoop export --connect jdbc:mysql://10.223.0.61:3306/cdellog --username cdel --password hdcdel20141217 -m 2 --table member_info_new --columns "memberid,uid,email,fullname,sex,birthday,province,area,edulevel,unitname,logdata,domain" --export-dir /user/cdel/tmp_member_info_new_mysql_20150421
--fields-terminated-by ' 01';
select me.memberid,me.uid,me.email,me.fullname,me.sex,me.birthday,me.province,me.area,me.edulevel,me.unitname,max(me.logdata) as logdata,me.domain
from order.tmp_member_info_new me
where uid is not null and uid !=''
更多:mysql表名大小写混合Hive—Hbase—Sqoop—Mysql
https://www.002pc.comhttps://www.002pc.com/mysql/1059.html
你可能感兴趣的Hbase,Sqoop,Hive,Mysql
No alive nodes found in your cluster
0踩
赏
0 赞