hadoop运行任务

前置准备1:启动hadoop
sh start-dfs.sh
sh start-yarn.sh
log:/appl/hadoop-2.7.0/logs
jps:datanode,namenode,nodemanager,secondarynamenode,resourcemanager
验证:http://192.168.56.250:8088/cluster

前置准备2:hadoop命令
hadoop fs -put localfile /user/hadoop/hadoopfile
hadoop fs -ls /user/hadoop/file1
hadoop fs -ls hdfs://localhost:9200

1、wordCount

Java Lib


Java source

import java.io.IOException;
import java.util.StringTokenizer;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.GenericOptionsParser;

public class MrTest01 {

	public static class TokenizerMapper extends Mapper<Object, Text, Text, IntWritable> {
		private final static IntWritable one = new IntWritable(1);
		private Text word = new Text();
		public void map(Object key, Text value, Context context) throws IOException, InterruptedException {
			StringTokenizer itr = new StringTokenizer(value.toString());
			while (itr.hasMoreTokens()) {
				word.set(itr.nextToken());
				context.write(word, one);
			}
		}
	}

	public static class IntSumReducer extends Reducer<Text,IntWritable,Text,IntWritable> {
		private IntWritable result = new IntWritable();
		public void reduce(Text key, Iterable<IntWritable> values,Context context) throws IOException, InterruptedException {
			int sum = 0;
			for (IntWritable val:values) {
				sum += val.get();
			}
		result.set(sum);
		context.write(key, result);
		}
	}
		
	/**
	 * @param args
	 */
	public static void main(String[] args) throws Exception {
		// TODO Auto-generated method stub
		Configuration conf = new Configuration();
		String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs();
		if (otherArgs.length != 2) {
			System.err.println("Usage: wordcount ");
			System.exit(2);
		}
		
		Job job = new Job(conf, "word count");
		job.setJarByClass(MrTest01.class);
		job.setMapperClass(TokenizerMapper.class);
		job.setCombinerClass(IntSumReducer.class);
		job.setReducerClass(IntSumReducer.class);
		job.setOutputKeyClass(Text.class);
		job.setOutputValueClass(IntWritable.class);
		FileInputFormat.addInputPath(job, new Path(otherArgs[0]));
		FileOutputFormat.setOutputPath(job, new Path(otherArgs[1]));
		System.exit(job.waitForCompletion(true) ? 0 : 1);
	}
}

打包方法:http://www.aboutyun.com/thread-7408-1-1.html(普通jar包可以了,此例子不需要可运行包)


# 启动hadoop

sh start-dfs.sh
sh start-yarn.sh
# log:/appl/hadoop-2.7.0/logs

# hadoop fs -put filename hdfs
hadoop fs -put /appl/hadoop-2.7.0/NOTICE.txt /test/

# hadoop jar xxx.jar [arg0,arg1,...]
hadoop jar /mk/test/MrTest01.jar hdfs://localhost:9000/test/NOTICE.txt hdfs://localhost:9000/test02/
[root@centos1 current]# hadoop jar /mk/test/MrTest01.jar hdfs://localhost:9000/test/NOTICE.txt hdfs://localhost:9000/test02
16/03/22 18:10:31 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable
16/03/22 18:10:32 INFO client.RMProxy: Connecting to ResourceManager at /0.0.0.0:8032
16/03/22 18:10:34 INFO input.FileInputFormat: Total input paths to process : 1
16/03/22 18:10:34 INFO mapreduce.JobSubmitter: number of splits:1
16/03/22 18:10:34 INFO mapreduce.JobSubmitter: Submitting tokens for job: job_1458639126126_0001
16/03/22 18:10:35 INFO impl.YarnClientImpl: Submitted application application_1458639126126_0001
16/03/22 18:10:35 INFO mapreduce.Job: The url to track the job: http://centos1:8088/proxy/application_1458639126126_0001/
16/03/22 18:10:35 INFO mapreduce.Job: Running job: job_1458639126126_0001
16/03/22 18:10:46 INFO mapreduce.Job: Job job_1458639126126_0001 running in uber mode : false
16/03/22 18:10:46 INFO mapreduce.Job:  map 0% reduce 0%
16/03/22 18:10:53 INFO mapreduce.Job:  map 100% reduce 0%
16/03/22 18:11:02 INFO mapreduce.Job:  map 100% reduce 100%
16/03/22 18:11:02 INFO mapreduce.Job: Job job_1458639126126_0001 completed successfully
16/03/22 18:11:03 INFO mapreduce.Job: Counters: 49
	File System Counters
		FILE: Number of bytes read=173
		FILE: Number of bytes written=229659
		FILE: Number of read operations=0
		FILE: Number of large read operations=0
		FILE: Number of write operations=0
		HDFS: Number of bytes read=203
		HDFS: Number of bytes written=123
		HDFS: Number of read operations=6
		HDFS: Number of large read operations=0
		HDFS: Number of write operations=2
	Job Counters 
		Launched map tasks=1
		Launched reduce tasks=1
		Data-local map tasks=1
		Total time spent by all maps in occupied slots (ms)=5612
		Total time spent by all reduces in occupied slots (ms)=6218
		Total time spent by all map tasks (ms)=5612
		Total time spent by all reduce tasks (ms)=6218
		Total vcore-seconds taken by all map tasks=5612
		Total vcore-seconds taken by all reduce tasks=6218
		Total megabyte-seconds taken by all map tasks=5746688
		Total megabyte-seconds taken by all reduce tasks=6367232
	Map-Reduce Framework
		Map input records=2
		Map output records=11
		Map output bytes=145
		Map output materialized bytes=173
		Input split bytes=102
		Combine input records=11
		Combine output records=11
		Reduce input groups=11
		Reduce shuffle bytes=173
		Reduce input records=11
		Reduce output records=11
		Spilled Records=22
		Shuffled Maps =1
		Failed Shuffles=0
		Merged Map outputs=1
		GC time elapsed (ms)=126
		CPU time spent (ms)=1480
		Physical memory (bytes) snapshot=322916352
		Virtual memory (bytes) snapshot=2383241216
		Total committed heap usage (bytes)=164630528
	Shuffle Errors
		BAD_ID=0
		CONNECTION=0
		IO_ERROR=0
		WRONG_LENGTH=0
		WRONG_MAP=0
		WRONG_REDUCE=0
	File Input Format Counters 
		Bytes Read=101
	File Output Format Counters 
		Bytes Written=123

hadoop fs -ls /test02/
-rw-r--r--   3 root supergroup          0 2016-03-22 18:11 /test02/_SUCCESS
-rw-r--r--   3 root supergroup        123 2016-03-22 18:11 /test02/part-r-00000
hadoop fs -cat /test02/part-r-00000
[root@centos1 current]# hadoop fs -cat /test02/part-r-00000
16/03/22 18:12:37 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable
(http://www.apache.org/).	1
Apache	1
Foundation	1
Software	1
The	1
This	1
by	1
developed	1
includes	1
product	1
software	1


引入第三方包的方法

/appl/hadoop-2.7.0/etc/hadoop/hadoop-env.sh
export HADOOP_CLASSPATH=/appl/elasticsearch-hadoop-2.1.2/dist/elasticsearch-hadoop-2.1.2.jar

Refer
http://www.aboutyun.com/thread-7408-1-1.html

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值