hadoop自带RandomWriter例子解析

本文详细解析了Hadoop自带的RandomWriter示例,包括配置、运行过程以及输出结果。通过10个Map任务,将数据写入HDFS,最终在/user/hadoop/output目录下生成输出文件,记录了每个任务的字节数和记录数。
摘要由CSDN通过智能技术生成

步骤:

                   在eclipse中运行

                   右键--》run as -->run configuration

                   在Programs argument中添加:/user/hadoop/output(此处随你自己修改)

代码:

/**

 * Licensed to the Apache Software Foundation (ASF) under one
 * or more contributor license agreements.  See the NOTICE file
 * distributed with this work for additional information
 * regarding copyright ownership.  The ASF licenses this file
 * to you under the Apache License, Version 2.0 (the
 * "License"); you may not use this file except in compliance
 * with the License.  You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */


package org.apache.hadoop.examples;


import java.io.IOException;
import java.util.Date;
import java.util.Random;


import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.mapred.ClusterStatus;
import org.apache.hadoop.mapred.FileOutputFormat;
import org.apache.hadoop.mapred.FileSplit;
import org.apache.hadoop.mapred.InputFormat;
import org.apache.hadoop.mapred.InputSplit;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.MapReduceBase;
import org.apache.hadoop.mapred.Mapper;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.RecordReader;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapred.SequenceFileOutputFormat;
import org.apache.hadoop.mapred.lib.IdentityReducer;
import org.apache.hadoop.util.GenericOptionsParser;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;


/**
 * This program uses map/reduce to just run a distributed job where there is no
 * interaction between the tasks and each task write a large unsorted random
 * binary sequence file of BytesWritable. In order for this program to generate
 * data for terasort with 10-byte keys and 90-byte values, have the following
 * config: <xmp> <?xml version="1.0"?> <?xml-stylesheet type="text/xsl"
 * href="configuration.xsl"?> <configuration> <property>
 * <name>test.randomwrite.min_key</name> <value>10</value> </property>
 * <property> <name>test.randomwrite.max_key</name> <value>10</value>
 * </property> <property> <name>test.randomwrite.min_value</name>
 * <value>90</value> </property> <property>
 * <name>test.randomwrite.max_value</name> <value>90</value> </property>
 * <property> <name>test.randomwrite.total_bytes</name>
 * <value>1099511627776</value> </property> </configuration></xmp>
 * 
 * Equivalently, {@link RandomWriter} also supports all the above options and
 * ones supported by {@link GenericOptionsParser} via the command-line.
 */
public class RandomWriter extends Configured implements Tool {


/**
* User counters
*/
static enum Counters {
RECORDS_WRITTEN, BYTES_WRITTEN
}


/**
* A custom input format that creates virtual inputs of a single string for
* each map.
*/
static class RandomInputFormat extends Configured implements
InputFormat<Text, Text> {
/**
* Generate the requested number of file splits, with the filename set
* to the filename of the output file.
*/
public InputSplit[] getSplits(JobConf job, int numSplits)
throws IOException {
/**  设置输入分片的个数  在run方法中设在的Map数量只是建议性质的 Map的数量是由InputFormat的getSplits方法放回的数组个数决定的  所以在这里进行设置**/
JobClient client = new JobClient(job);
ClusterStatus cluster = client.getClusterStatus();
/** 如果属性不存在 则返回默认的值 **/
int numMapsPerHost = job.getInt("test.randomwriter.maps_per_host",
10);
long numBytesToWritePerMap = job.getLong(
"test.randomwrite.bytes_per_map", 1 * 1024 * 1024 * 1024);
if (numBytesToWritePerMap == 0) {
System.err
.println("Cannot have test.randomwrite.bytes_per_map set to 0");
}
long totalBytesToWrite = job
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值