Hadoop词频统计(一)之集群模式运行

maven pom.xml:

<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
	xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
	<modelVersion>4.0.0</modelVersion>
	<groupId>HadoopStu</groupId>
	<artifactId>HadoopStu</artifactId>
	<version>0.0.1-SNAPSHOT</version>
	<build>
		<sourceDirectory>src</sourceDirectory>
		<resources>
			<resource>
				<directory>src</directory>
				<excludes>
					<exclude>**/*.java</exclude>
				</excludes>
			</resource>
		</resources>
		<plugins>
			<plugin>
				<artifactId>maven-compiler-plugin</artifactId>
				<version>3.3</version>
				<configuration>
					<source>1.8</source>
					<target>1.8</target>
				</configuration>
			</plugin>
		</plugins>
	</build>
	<dependencies>
		<!-- https://mvnrepository.com/artifact/org.apache.hadoop/hadoop-common -->
		<dependency>
			<groupId>org.apache.hadoop</groupId>
			<artifactId>hadoop-common</artifactId>
			<version>2.6.0</version>
		</dependency>
		<!-- https://mvnrepository.com/artifact/org.apache.hadoop/hadoop-core -->
		<dependency>
			<groupId>org.apache.hadoop</groupId>
			<artifactId>hadoop-core</artifactId>
			<version>1.2.1</version>
		</dependency>
		<!-- https://mvnrepository.com/artifact/junit/junit -->
		<dependency>
			<groupId>junit</groupId>
			<artifactId>junit</artifactId>
			<version>4.11</version>
		</dependency>

	</dependencies>
</project>



map:

package cn.hadoop.mr;

import java.io.IOException;

import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.util.StringUtils;

public class WCMapper extends Mapper<LongWritable, Text, Text, LongWritable>{

	@Override
	protected void map(LongWritable key, Text value, Mapper<LongWritable, Text, Text, LongWritable>.Context context)
			throws IOException, InterruptedException {
		// TODO Auto-generated method stub
		String line = value.toString();
		
		String[] words = StringUtils.split(line,' ');
		
		for(String word : words) {
			context.write(new Text(word), new LongWritable(1));
		}
	}
}

reduce:

package cn.hadoop.mr;

import java.io.IOException;

import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;

public class WCReducer extends Reducer<Text, LongWritable, Text, LongWritable> {
	@Override
	protected void reduce(Text key, Iterable<LongWritable> values,
			Reducer<Text, LongWritable, Text, LongWritable>.Context context) throws IOException, InterruptedException {
		long count = 0;
		for(LongWritable value : values) {
			count += value.get();
		}
		context.write(key, new LongWritable(count));
	}
}

run:

package cn.hadoop.mr;

import java.io.IOException;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;


public class WCRunner {
	public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
		
		Configuration conf = new Configuration();
		Job wcjob = Job.getInstance(conf);
		
		wcjob.setJarByClass(WCRunner.class);
		
		wcjob.setMapperClass(WCMapper.class);
		wcjob.setReducerClass(WCReducer.class);
		
		wcjob.setOutputKeyClass(Text.class);
		wcjob.setOutputValueClass(LongWritable.class);
		
		wcjob.setMapOutputKeyClass(Text.class);
		wcjob.setMapOutputValueClass(LongWritable.class);
		
		FileInputFormat.setInputPaths(wcjob, "/wc/inputdata/");
		FileOutputFormat.setOutputPath(wcjob, new Path("/output/"));
		
		wcjob.waitForCompletion(true);
	}
}
生成输入数据:

[hadoop@hadoop01 ~]$ cat in.dat
haha lalala
hehe heiheihei
heiheihei lololo
lololo haha
haha haha
hehe lololo


在HDFS上创建相应路径:
[hadoop@hadoop01 ~]$ hadoop fs -mkdir -p /wc/inputdata


将in.dat文本文件上传到HDFS上的相应路径下:

[hadoop@hadoop01 ~]$ hadoop fs -put in.dat /wc/inputdata/


将上面的java程序打成jar包上传服务器,然后通过Hadoop调用:

hadoop jar mr.jar cn.hadoop.mr.WCRunner

[hadoop@hadoop01 ~]$ hadoop jar wc.jar  cn.hadoop.mr.WCRunner
16/07/25 15:25:05 INFO client.RMProxy: Connecting to ResourceManager at hadoop01/192.168.56.200:8032
16/07/25 15:25:06 WARN mapreduce.JobSubmitter: Hadoop command-line option parsing not performed. Implement the Tool interface and execute your application with ToolRunner to remedy this.
16/07/25 15:25:06 INFO input.FileInputFormat: Total input paths to process : 1
16/07/25 15:25:06 INFO mapreduce.JobSubmitter: number of splits:1
16/07/25 15:25:07 INFO mapreduce.JobSubmitter: Submitting tokens for job: job_1469431467769_0001
16/07/25 15:25:07 INFO impl.YarnClientImpl: Submitted application application_1469431467769_0001
16/07/25 15:25:07 INFO mapreduce.Job: The url to track the job: http://hadoop01:8088/proxy/application_1469431467769_0001/
16/07/25 15:25:07 INFO mapreduce.Job: Running job: job_1469431467769_0001
16/07/25 15:25:16 INFO mapreduce.Job: Job job_1469431467769_0001 running in uber mode : false
16/07/25 15:25:16 INFO mapreduce.Job:  map 0% reduce 0%
16/07/25 15:25:23 INFO mapreduce.Job:  map 100% reduce 0%
16/07/25 15:25:30 INFO mapreduce.Job:  map 100% reduce 100%
16/07/25 15:25:31 INFO mapreduce.Job: Job job_1469431467769_0001 completed successfully
16/07/25 15:25:31 INFO mapreduce.Job: Counters: 49
	File System Counters
		FILE: Number of bytes read=204
		FILE: Number of bytes written=211397
		FILE: Number of read operations=0
		FILE: Number of large read operations=0
		FILE: Number of write operations=0
		HDFS: Number of bytes read=183
		HDFS: Number of bytes written=44
		HDFS: Number of read operations=6
		HDFS: Number of large read operations=0
		HDFS: Number of write operations=2
	Job Counters 
		Launched map tasks=1
		Launched reduce tasks=1
		Data-local map tasks=1
		Total time spent by all maps in occupied slots (ms)=4219
		Total time spent by all reduces in occupied slots (ms)=4519
		Total time spent by all map tasks (ms)=4219
		Total time spent by all reduce tasks (ms)=4519
		Total vcore-seconds taken by all map tasks=4219
		Total vcore-seconds taken by all reduce tasks=4519
		Total megabyte-seconds taken by all map tasks=4320256
		Total megabyte-seconds taken by all reduce tasks=4627456
	Map-Reduce Framework
		Map input records=6
		Map output records=12
		Map output bytes=174
		Map output materialized bytes=204
		Input split bytes=105
		Combine input records=0
		Combine output records=0
		Reduce input groups=5
		Reduce shuffle bytes=204
		Reduce input records=12
		Reduce output records=5
		Spilled Records=24
		Shuffled Maps =1
		Failed Shuffles=0
		Merged Map outputs=1
		GC time elapsed (ms)=93
		CPU time spent (ms)=1100
		Physical memory (bytes) snapshot=348495872
		Virtual memory (bytes) snapshot=1864597504
		Total committed heap usage (bytes)=219480064
	Shuffle Errors
		BAD_ID=0
		CONNECTION=0
		IO_ERROR=0
		WRONG_LENGTH=0
		WRONG_MAP=0
		WRONG_REDUCE=0
	File Input Format Counters 
		Bytes Read=78
	File Output Format Counters 
		Bytes Written=44


输出结果如下:

haha    4
hehe    2
heiheihei    2
lalala    1
lololo    3


评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值