Hive 的默认分隔符为:
字段:\001
行:\n
当数据中包含了例如 \001、\n 时,就需要自定义分隔符了。并且,自定义的分隔符要为多字符,单字符重复的概率太高了。
Hive 的 MultiDelimitSerDe 只支持字段的多字符分隔,行的换行符不支持自定义修改。
这里重写 TextInputFormat ,用来支持行的多字符分隔。
依赖
<dependencies>
<dependency>
<groupId>org.apache.hive</groupId>
<artifactId>hive-exec</artifactId>
<version>2.3.5</version>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-client</artifactId>
<version>2.8.5</version>
<scope>provided</scope>
</dependency>
</dependencies>
核心代码
修改参数 textinputformat.record.delimiter 为 cn.knx.textinputformat.record.delimiter,避免影响其他任务。
@SuppressWarnings("all")
public class UDTextInputFormat extends FileInputFormat<LongWritable, Text> implements JobConfigurable {
private CompressionCodecFactory compressionCodecs = null;
public UDTextInputFormat() {
}
public void configure(JobConf conf) {
this.compressionCodecs = new CompressionCodecFactory(conf);
}
protected boolean isSplitable(FileSystem fs, Path file) {
CompressionCodec codec = this.compressionCodecs.getCodec(file);
return null == codec ? true : codec instanceof SplittableCompressionCodec;
}
public RecordReader<LongWritable, Text> getRecordReader(InputSplit genericSplit, JobConf job, Reporter reporter) throws IOException {
reporter.setStatus(genericSplit.toString());
String delimiter = job.get("cn.knx.textinputformat.record.delimiter");
byte[] recordDelimiterBytes = null;
if (null != delimiter) {
recordDelimiterBytes = delimiter.getBytes(Charsets.UTF_8);
}
return new LineRecordReader(job, (FileSplit)genericSplit, recordDelimiterBytes);
}
}
Hive 的使用方式
-- add jar
add jar hdfs:///user/survey/UDSerde-0.0.1-SNAPSHOT.jar;
drop table test.ConfirmEmail;
-- set cn.knx.textinputformat.record.delimiter=@#$%n;
create table test.ConfirmEmail(
id string,
Email string,
ConfirmCode string,
CreateDate string,
FailureDate string,
ConfirmDate string
)
ROW FORMAT SERDE 'org.apache.hadoop.hive.contrib.serde2.MultiDelimitSerDe'
WITH SERDEPROPERTIES ('field.delim'='@#$%t')
STORED AS INPUTFORMAT 'cn.knx.udserde.hadoop.UDTextInputFormat'
OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
TBLPROPERTIES('cn.knx.textinputformat.record.delimiter'='@#$%n')
;
TRUNCATE table test.ConfirmEmail;
load data inpath "/user/survey/ConfirmEmail_v3.txt" overwrite into table test.ConfirmEmail;
SELECT * from test.ConfirmEmail;