前置学习
单词计数:MapReduce实现单词计数(含分区和Combiner)-CSDN博客
本次操作基于上述pom.xml、log4j、Mapper等资料,相同之处不在本文中体现
1、创建存储表
CREATE TABLE `tb_wordcount` (
`f_word` varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_0900_ai_ci DEFAULT NULL,
`f_count` int(11) DEFAULT NULL
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci
2、在pom.xml中新增依赖
<dependency>
<groupId>mysql</groupId>
<artifactId>mysql-connector-java</artifactId>
<version>8.0.18</version>
</dependency>
3、创建WordCountDBWritable类
package com.soft863.writable;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.mapred.lib.db.DBWritable;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
public class WordCountDBWritable implements DBWritable, Writable {
private String word;
private Integer count;
public WordCountDBWritable(String word, Integer count) {
this.word = word;
this.count = count;
}
@Override
public void write(DataOutput out) throws IOException {
out.writeUTF(word);
out.writeInt(count);
}
@Override
public void readFields(DataInput in) throws IOException {
}
@Override
public void write(PreparedStatement statement) throws SQLException {
statement.setString(1, word);
statement.setInt(2, count);
}
@Override
public void readFields(ResultSet resultSet) throws SQLException {
}
}
4、创建WordCountDBReducer类
package com.soft863;
import com.soft863.writable.WordCountDBWritable;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
import java.io.IOException;
public class WordCountDBReducer extends Reducer<Text, IntWritable, WordCountDBWritable, NullWritable> {
@Override
protected void reduce(Text key, Iterable<IntWritable> values, Context context) throws IOException, InterruptedException {
int count =0;
for(IntWritable value:values){
count+=value.get();
}
context.write(new WordCountDBWritable(key.toString(),count),NullWritable.get());
}
}
5、创建WordCountDBDriver类
package com.soft863;
import com.soft863.writable.WordCountDBWritable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.lib.db.DBConfiguration;
import org.apache.hadoop.mapred.lib.db.DBOutputFormat;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import java.io.IOException;
public class WordCountDBDriver {
public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
Configuration conf = new Configuration();
conf.set("fs.defaultFS", "hdfs://hadoop100:9000");
DBConfiguration.configureDB(conf, "com.mysql.cj.jdbc.Driver", "jdbc:mysql://hadoop100:3306/wjobs?useUnicode=true&characterEncoding=UTF-8&serverTimezone=UTC",
"root", "root123");
Job job = Job.getInstance(conf);
//重要:指定本job所在的jar包
job.setJarByClass(WordCountDBDriver.class);
//设置wordCountJob所用的mapper逻辑类为哪个类
job.setMapperClass(WordCountMapper.class);
//设置wordCountJob所用的reducer逻辑类为哪个类
job.setReducerClass(WordCountDBReducer.class);
//设置map阶段输出的kv数据类型
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(IntWritable.class);
//设置最终输出的kv数据类型
job.setOutputKeyClass(WordCountDBWritable.class);
job.setOutputValueClass(NullWritable.class);
//设置要处理的文本数据所存放的路径
Path sourceFile = new Path(args[0]);
FileInputFormat.setInputPaths(job, sourceFile);
job.setOutputFormatClass(DBOutputFormat.class);
DBOutputFormat.setOutput(job,"tb_wordcount",
"f_word","f_count");
//提交job给hadoop集群
boolean success = job.waitForCompletion(true);
System.exit(success ? 0 : 1);
}
}
7、执行程序
确保hdfs启动并且/data/word.txt路径下有数据
添加运行参数
执行WordCountDBDriver代码
6、查看结果
查看数据库tb_wordcount表