Spark 自定义输出文件格式

MyTextOutputFormat.java
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.compress.CompressionCodec;
import org.apache.hadoop.io.compress.GzipCodec;
import org.apache.hadoop.mapred.*;
import org.apache.hadoop.util.Progressable;
import org.apache.hadoop.util.ReflectionUtils;
import java.io.DataOutputStream;
import java.io.IOException;
import java.io.UnsupportedEncodingException;


public class MyTextOutputFormat <K, V> extends FileOutputFormat<K, V> {

    protected static class LineRecordWriter<K, V>
            implements RecordWriter<K, V> {
        private static final String utf8 = "UTF-8";
        private static final byte[] newline;
        static {
            try {
                newline = "".getBytes(utf8);
            } catch (UnsupportedEncodingException uee) {
                throw new IllegalArgumentException("can't find " + utf8 + " encoding");
            }
        }

        protected DataOutputStream out;
        private final byte[] keyValueSeparator;

        public LineRecordWriter(DataOutputStream out, String keyValueSeparator) {
            this.out = out;
            try {
                this.keyValueSeparator = keyValueSeparator.getBytes(utf8);
            } catch (UnsupportedEncodingException uee) {
                throw new IllegalArgumentException("can't find " + utf8 + " encoding");
            }
        }

        public LineRecordWriter(DataOutputStream out) {
            this(out, "\t");
        }

        /**
         * Write the object to the byte stream, handling Text as a special
         * case.
         * @param o the object to print
         * @throws IOException if the write throws, we pass it on
         */
        private void writeObject(Object o) throws IOException {
            if (o instanceof Text) {
                Text to = (Text) o;
                out.write(to.getBytes(), 0, to.getLength());
            } else {
                out.write(o.toString().getBytes(utf8));
            }
        }

        public synchronized void write(K key, V value)
                throws IOException {

            boolean nullKey = key == null || key instanceof NullWritable;
            boolean nullValue = value == null || value instanceof NullWritable;
            if (nullKey && nullValue) {
                return;
            }
            if (!nullKey) {
                writeObject(key);
            }
            if (!(nullKey || nullValue)) {
                out.write(keyValueSeparator);
            }
            if (!nullValue) {
                writeObject(value);
            }
            out.write(newline);
        }

        public synchronized void close(Reporter reporter) throws IOException {
            out.close();
        }
    }

    public RecordWriter<K, V> getRecordWriter(FileSystem ignored,
                                              JobConf job,
                                              String name,
                                              Progressable progress)
            throws IOException {
        boolean isCompressed = getCompressOutput(job);
        String keyValueSeparator = job.get("mapreduce.output.textoutputformat.separator",
                "\t");
        if (!isCompressed) {
            Path file = FileOutputFormat.getTaskOutputPath(job, name);
            FileSystem fs = file.getFileSystem(job);
            FSDataOutputStream fileOut = fs.create(file, progress);
            return new MyTextOutputFormat.LineRecordWriter<K, V>(fileOut, keyValueSeparator);
        } else {
            Class<? extends CompressionCodec> codecClass =
                    getOutputCompressorClass(job, GzipCodec.class);
            // create the named codec
            CompressionCodec codec = ReflectionUtils.newInstance(codecClass, job);
            // build the filename including the extension
            Path file =
                    FileOutputFormat.getTaskOutputPath(job,
                            name + codec.getDefaultExtension());
            FileSystem fs = file.getFileSystem(job);
            FSDataOutputStream fileOut = fs.create(file, progress);
            return new MyTextOutputFormat.LineRecordWriter<K, V>(new DataOutputStream
                    (codec.createOutputStream(fileOut)),
                    keyValueSeparator);
        }
    }
}

MyMultipleTextOutputFormat.java
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.RecordWriter;
import org.apache.hadoop.mapred.lib.MultipleOutputFormat;
import org.apache.hadoop.util.Progressable;
import java.io.IOException;


@InterfaceAudience.Public
@InterfaceStability.Stable
public class MyMultipleTextOutputFormat<K, V> extends MultipleOutputFormat<K, V> {

    private MyTextOutputFormat<K, V> theTextOutputFormat = null;

    @Override
    protected RecordWriter<K, V> getBaseRecordWriter(FileSystem fs, JobConf job,
                                                     String name, Progressable arg3) throws IOException {
        if (theTextOutputFormat == null) {
            theTextOutputFormat = new MyTextOutputFormat<K,V>();
        }
        return theTextOutputFormat.getRecordWriter(fs, job, name, arg3);
    }
}
CustomOutputFormatDATEmpty.java
public class CustomOutputFormatDATEmpty extends MyMultipleTextOutputFormat<String, OutputDatDto> {
    @Override
    public String generateFileNameForKeyValue(String key, OutputDatDto value, String name) {
        String formname = value.getForm_name();
        String rulename = value.getRule_name();
        return formname + "." + rulename+"."+value.getCob_date()+".csv";
    }

    @Override
    public String generateActualKey(String key, OutputDatDto value) {
        return null;
    }

    @Override
    protected OutputDatDto generateActualValue(String key, OutputDatDto value) {
        return value;
    }
}

其中, 类OutputDatDto.java 是自定义数据类。

方法generateFileNameForKeyValue中定义了输出文件的格式和名称,可以实现自定义文件格式和名称。

1.为了实现自定义的文件后缀名,CustomOutputFormatDATEmpty见此类中自定义输出.csv文件,其实还测试过.dat , .dml文件,只是后来因为业务原因不再使用。

2.为了实现通用spark输出的csv文件,如果是个空的DataSet是不会生产csv文件的header信息的,这里为了满足业务需要,改写了原来的输出类,硬是输出了。

JavaPairRDD<String, OutputDatDto> rdd = sc.parallelize(d).mapToPair(dd -> {
    return new Tuple2<>("", dd);
});
rdd.saveAsHadoopFile(csvPath,String.class,OutputDatDto.class,CustomOutputFormatDATEmpty.class);
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值