解决Hadoop无法处理中文问题

由于Hadoop默认编码为UTF-8,并且将UTF-8进行了硬编码,所以我们在处理中文时需要重写OutputFormat类。方法为:

1、新建类GBKFileOutputFormat,代码如下:
import java.io.DataOutputStream;  
import java.io.IOException;  
import java.io.UnsupportedEncodingException;  
 
import org.apache.hadoop.conf.Configuration;  
import org.apache.hadoop.fs.FileSystem;  
import org.apache.hadoop.fs.Path;  
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.mapreduce.lib.*;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
 
import org.apache.hadoop.io.NullWritable;  
import org.apache.hadoop.io.Text;  
import org.apache.hadoop.io.compress.CompressionCodec;  
import org.apache.hadoop.io.compress.GzipCodec;  
import org.apache.hadoop.mapreduce.OutputFormat;  
import org.apache.hadoop.mapreduce.RecordWriter;  
import org.apache.hadoop.mapreduce.TaskAttemptContext;  
import org.apache.hadoop.util.*;  
 
/** An {@link OutputFormat} that writes plain text files. */  
public class GBKFileOutputFormat<K, V> extends FileOutputFormat<K, V> {//TextInputFormat是默认的输出文件格式  
  protected static class LineRecordWriter<K, V>//默认  
    extends RecordWriter<K, V> {  
    private static final String utf8 = "GBK";  //硬编码,将“UTF-8”改为“GBK”  
    private static final byte[] newline;//行结束符?  
    static {  
      try {  
        newline = "\n".getBytes(utf8);  
      } catch (UnsupportedEncodingException uee) {  
        throw new IllegalArgumentException("can't find " + utf8 + " encoding");  
      }  
    }  
 
    protected DataOutputStream out;  
    private final byte[] keyValueSeparator;//key和value的分隔符,默认的好像是Tab  
 
    public LineRecordWriter(DataOutputStream out, String keyValueSeparator) {//构造函数,初始化输出流及分隔符   
      this.out = out;  
      try {  
        this.keyValueSeparator = keyValueSeparator.getBytes(utf8);  
      } catch (UnsupportedEncodingException uee) {  
        throw new IllegalArgumentException("can't find " + utf8 + " encoding");  
      }  
    }  
 
    public LineRecordWriter(DataOutputStream out) {//默认的分隔符  
      this(out, "\t");  
    }  
 
    /**
     * Write the object to the byte stream, handling Text as a special输出流是byte格式的
     * case.
     * @param o the object to print是要输出的对象
     * @throws IOException if the write throws, we pass it on
     */  
    private void writeObject(Object o) throws IOException {//应该是一行一行的写 key keyValueSeparator value \n  
      if (o instanceof Text) {//如果o是Text的实例  
        Text to = (Text) o;  
        out.write(to.getBytes(), 0, to.getLength());//写出  
      } else {  
        out.write(o.toString().getBytes(utf8));  
      }  
    }  
 
    public synchronized void write(K key, V value)//给写线程加锁,写是互斥行为  
      throws IOException {  
//下面是为了判断key和value是否为空值  
      boolean nullKey = key == null || key instanceof NullWritable;//这语句太牛了  
      boolean nullValue = value == null || value instanceof NullWritable;  
      if (nullKey && nullValue) {//  
        return;  
      }  
      if (!nullKey) {  
        writeObject(key);  
      }  
      if (!(nullKey || nullValue)) {  
        out.write(keyValueSeparator);  
      }  
      if (!nullValue) {  
        writeObject(value);  
      }  
      out.write(newline);  
    }  
 
    public synchronized   
    void close(TaskAttemptContext context) throws IOException {  
      out.close();  
    }  
  }  
 
  public RecordWriter<K, V>    getRecordWriter(TaskAttemptContext job//获得writer实例  
                         ) throws IOException, InterruptedException {  
    Configuration conf = job.getConfiguration();  
    boolean isCompressed = getCompressOutput(job);//  
    String keyValueSeparator= conf.get("mapred.textoutputformat.separator",  
                                       "\t");  
    CompressionCodec codec = null;//压缩格式 还是?  
    String extension = "";  
    if (isCompressed) {  
      Class<? extends CompressionCodec> codecClass =   
        getOutputCompressorClass(job, GzipCodec.class);  
      codec = (CompressionCodec) ReflectionUtils.newInstance(codecClass, conf);  
      extension = codec.getDefaultExtension();  
    }  
    Path file = getDefaultWorkFile(job, extension);//这个是获取缺省的文件路径及名称,在FileOutput中有对其的实现  
    FileSystem fs = file.getFileSystem(conf);  
    if (!isCompressed) {  
      FSDataOutputStream fileOut = fs.create(file, false);  
      return new LineRecordWriter<K, V>(fileOut, keyValueSeparator);  
    } else {  
      FSDataOutputStream fileOut = fs.create(file, false);  
      return new LineRecordWriter<K, V>(new DataOutputStream  
                                        (codec.createOutputStream(fileOut)),  
                                        keyValueSeparator);  
    }  
  }  
}  

该类是在源代码中TextOutputFormat类基础上进行修改的,在这需要注意的一点是继承的父类FileOutputFormat是位于org.apache.hadoop.mapreduce.lib.output包中的

2、在主类中添加job.setOutputFormatClass(GBKFileOutputFormat.class);
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值