(1)在文件夹下写定一个hello.txt文件。
python hello
java python
c++ java
python php
(2)然后编写一个入门级的mapreduce程序。一个mapreduce程序分为Mapper、Reducer、Driver。
本程序使用maven.pom.xml代码如下。
<dependencies>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>RELEASE</version>
</dependency>
<dependency>
<groupId>org.apache.logging.log4j</groupId>
<artifactId>log4j-core</artifactId>
<version>2.8.2</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<version>2.7.2</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-client</artifactId>
<version>2.7.2</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdfs</artifactId>
<version>2.7.2</version>
</dependency>
</dependencies>
(3)在项目的src/main/resource下面新建一个文件。名为“log4j.properties”
代码如下:
log4j.rootLogger=INFO, stdout
log4j.appender.stdout=org.apache.log4j.ConsoleAppender
log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
log4j.appender.stdout.layout.ConversionPattern=%d %p [%c] - %m%n
log4j.appender.logfile=org.apache.log4j.FileAppender
log4j.appender.logfile.File=target/spring.log
log4j.appender.logfile.layout=org.apache.log4j.PatternLayout
log4j.appender.logfile.layout.ConversionPattern=%d %p [%c] - %m%n
(4)编写Mapper程序。
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import java.io.IOException;
public class WcMapper extends Mapper<LongWritable,Text,Text,IntWritable> {
//偏移量:距离开始有多少字符。
//输入的内容是LongWritable,Text。就是输入内容的偏移量和这一行内容
//这样可以避免大量new对象
private Text word=new Text();
private IntWritable one=new IntWritable(1);
@Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
//拿到这一行数据
String line=value.toString();
//按照空格切分数据
String[] words=line.split(" ");
//遍历数组,把单词变成(word,1)的形式输出给框架
for(String word:words)
{
this.word.set(word);
context.write(this.word,this.one);
}
}
}
(5)编写Reducer程序。
package comsk;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
import java.io.IOException;
public class WcReducer extends Reducer<Text,IntWritable,Text,IntWritable> {
private IntWritable total=new IntWritable();
@Override
protected void reduce(Text key, Iterable<IntWritable> values, Context context) throws IOException, InterruptedException {
//做累加
int sum=0;
for(IntWritable value:values)
{
sum += value.get();
}
//包装结果并输出
total.set(sum);
context.write(key,total);
}
}
(6)编写driver类。
package com.sk.flow;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import java.io.IOException;
public class FlowDriver {
public static void main(String[] args) throws IOException,InterruptedException,ClassNotFoundException
{
//1.获取job实例
Job job=Job.getInstance(new Configuration());
//2.设置类路径
job.setJarByClass(FlowDriver.class);
//3设置Mapper和Reducer
job.setMapperClass(FlowMapper.class);
job.setReducerClass(FlowReducer.class);
//4.设置输入输出key,value
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(FlowBean.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(FlowBean.class);
//5设置输入输出路径
FileInputFormat.setInputPaths(job,new Path(args[0]));
FileOutputFormat.setOutputPath(job,new Path(args[1]));
//提交
boolean b=job.waitForCompletion(true);
System.exit(b?0:1);
}
}
运行后点击:
在如上位置写输入和输出。
这是mapreduce一个入门级的程序,一定要理解每一步是干嘛的。