Java操作Hadoop
1、Windows安装并配置Hadoop
;详情请见【Windows安装并配置Hadoop】
2、打开IDEA
创建一个maven
项目
3、填好GroupId(包名)
、ArtifactId(项目名)
4、选择maven
安装路径、maven中sittings.xml
文件路径
5、创建完成即可创建相应的Java
类
MapReduce 实现 WordCount
在实现java
操作Hadoop
之前需要先下载maven
依赖包 ;
在pom.xml
文件中配置依赖项,IDEA会自动下载;
MapReduce
需要的依赖包如下
<dependencies>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>4.12</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<version>2.6.0</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdfs</artifactId>
<version>2.6.0</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-auth</artifactId>
<version>2.6.0</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-client</artifactId>
<version>2.6.0</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-mapreduce-client-jobclient</artifactId>
<version>2.6.0</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-mapreduce-client-core</artifactId>
<version>2.6.0</version>
</dependency>
</dependencies>
下载好需要的安装包以后,创建Java类实现文件单词计数;
1、WCMapper 类
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import java.io.IOException;
public class WCMapper extends Mapper<LongWritable, Text,Text, IntWritable> {
@Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
System.out.println(key);
String line=value.toString();
String[] words=line.split(" ");
for(String word : words){
context.write(new Text(word),new IntWritable(1));
}
}
}
2、WCPartitioner 类
该类用于手动分区,将较大文件分区执行,提高效率
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Partitioner;
public class WCPartitioner extends Partitioner<Text, IntWritable> {
@Override
public int getPartition(Text text, IntWritable intWritable, int i) {
return Math.abs(text.hashCode()%i);
}
}
3、WCReduce 类
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
import java.io.IOException;
public class WCReduce extends Reducer<Text, IntWritable,Text,IntWritable> {
@Override
protected void reduce(Text key, Iterable<IntWritable> values, Context context) throws IOException, InterruptedException {
int total=0;
for(IntWritable value : values){
total+=value.get();
}
context.write(key,new IntWritable(total));
}
}
4、WCDriver 类
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
public class WCDriver {
public static void main(String[] args) throws Exception {
//1.建立连接
Configuration cfg=new Configuration();
Job job=Job.getInstance(cfg,"wc");
job.setJarByClass(WCDriver.class);
//2.指定mapper和reduce
job.setMapperClass(WCMapper.class);
job.setReducerClass(WCReduce.class);
//指定mapper输出类型
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(IntWritable.class);
//指定partitioner
// job.setNumReduceTasks(4);
// job.setPartitionerClass(WCPartitioner.class);
//指定reduce输出类型
job.setOutputKeyClass(Text.class);
job.setMapOutputValueClass(IntWritable.class);
//指定输出路径
FileInputFormat.setInputPaths(job,new Path("E:/test/settings.txt"));
FileOutputFormat.setOutputPath(job,new Path("E:/test/M"));
//3.运行
boolean result=job.waitForCompletion(true);
System.out.println(result?"成功":"失败");
System.exit(result?0:1);
}
}
运行结果如下
运行成功会生成M文件夹,里面包含读取结果文件
HDFS系统执行java代码
在HDFS系统上执行java实现单词分区;
1、将WCDriver
类中的读取文件路径与输出路径修改为传参方式;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
public class WCDriver {
public static void main(String[] args) throws Exception {
//1.建立连接
Configuration cfg=new Configuration();
Job job=Job.getInstance(cfg,"wc");
job.setJarByClass(WCDriver.class);
//2.指定mapper和reduce
job.setMapperClass(WCMapper.class);
job.setReducerClass(WCReduce.class);
//指定mapper输出类型
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(IntWritable.class);
job.setOutputKeyClass(Text.class);
job.setMapOutputValueClass(IntWritable.class);
//指定输出路径
FileInputFormat.setInputPaths(job,new Path(args[0]));
FileOutputFormat.setOutputPath(job,new Path(args[1]));
//3.运行
boolean result=job.waitForCompletion(true);
System.out.println(result?"成功":"失败");
System.exit(result?0:1);
}
}
2、打jar
包
给该项目打一个jar
包上传到虚拟机上,然后在虚拟机上执行命令;
hadoop jar test1.jar /test/c.txt /test/Mn
随后就可以在HDFS
系统中找到输出结果文件