场景:hdfs文件是以\001分割的字段,测试hive是否可以按\001切分字段
--示例文件如下(以\001作为分隔符):
cat -A test.txt
23782410120230001^A000^A2019-03-25 15:09:00.0
12345610120230010^A000^A2023-03-10 21:06:00.0
hadoop fs -mkdir hdfs://hacluster/user/hive/warehouse/test_db.db/test_delimiter
hadoop fs -put -f test.txt hdfs://hacluster/user/hive/warehouse/test_db.db/test_delimiter
--加载数据,由于默认分隔符是'\001',需设置为其他的分隔符
drop table if exists test_db.test_delimiter;
create external table if not exists test_db.test_delimiter (
text string
)
row format delimited fields terminated by '|'
stored as textfile
location 'hdfs://hacluster/user/hive/warehouse/test_db.db/test_delimiter'
;
--查询hive表数据
msck repair table test_db.test_delimiter;
select * from test_db.test_delimiter;
+--------------------------------------------+
| test_delimiter.text |
+--------------------------------------------+
| 237824101202300010002019-03-25 15:09:00.0 |
| 123456101202300100002023-03-10 21:06:00.0 |
+--------------------------------------------+
2 rows selected (4.801 seconds)
--两种方式均可正常切分
select split(text,'\001') from test_db.test_delimiter;
+------------------------------------------------------+
| _c0 |
+------------------------------------------------------+
| ["23782410120230001","000","2019-03-25 15:09:00.0"] |
| ["12345610120230010","000","2023-03-10 21:06:00.0"] |
+------------------------------------------------------+
2 rows selected (25.6 seconds)
select split(text,'\u0001') from test_db.test_delimiter;
+------------------------------------------------------+
| _c0 |
+------------------------------------------------------+
| ["23782410120230001","000","2019-03-25 15:09:00.0"] |
| ["12345610120230010","000","2023-03-10 21:06:00.0"] |
+------------------------------------------------------+
2 rows selected (23.134 seconds)
--取分隔的第1个字段
select split(text,'\001')[0] from test_db.test_delimiter;
+--------------------+
| _c0 |
+--------------------+
| 23782410120230001 |
| 12345610120230010 |
+--------------------+
2 rows selected (25.088 seconds)