去双引号:
sed -i "s/"//g" textName working: perl -p -i -e "s/ /,/g" ./wuhan_feiy_end_result.csv |
select patient_sn,year,sex,cast(year as int)-cast(birth_date as int) from tableName limit 10;
select trim(patient_sn) as patient_sn,trim(visit_type) as visit_type,trim(year) as year,trim(sex) as sex,trim(age) as age,sum(fee_sum) from tableName group by
trim(visit_type) ,trim(patient_sn), trim(year),trim(sex),trim(age) order by patient_sn,year,sex,age,visit_type ;
--结论:可能是 分组数据存在空格导致分组无效,使用trim()函数解决。
建表语句, 加载csv文件:
create table tableName (JZJLH string,SDYWH string) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.OpenCSVSerde' WITH SERDEPROPERTIES ( "separatorChar" = ",", "quoteChar" = "\"", "escapeChar" = "\\" ) |
--建表语句模板:
create table tableName(JZJLH string,SDYWH string) row format delimited fields terminated by '","' lines terminated by '\n' stored as textfile; |
加载数据到表中:
OVERWRITE INTO TABLE
tableName;
load本地新文件到hive:utf-8
load data local INPATH path OVERWRITE INTO TABLE tableName; |
查询结果只
生成一个文件:(部分字段)导出到本地
hive -e "SELECT concat(jzjlh,',',fldmmc,',',zje) FROM zhangheng.tableName;">/opt/textName |
INSERT OVERWRITE LOCAL DIRECTORY '/opt/zhangheng' row format delimited fields terminated by "," SELECT jzjlh, fyrq, fldm,sum(cast( zje as double)),fldmmc FROM tableName group by jzjlh, fyrq, fldm ,fldmmc ORDER BY jzjlh desc, fldm desc, fyrq desc ; |
查询结果插入hive表:
insert overwrite table tableName SELECT jzjlh, fyrq, fldm,sum(zje),fldmmc,count(zje) FROM tableName group by jzjlh, fyrq, fldm ,fldmmc ORDER BY jzjlh desc, fldm desc, fyrq desc ; |