mr中的常用数据类型
Text
IntWritable
VIntWritable
DoubleWritable
floatWritable
shortWritable
longWritable
VLongWritable
byteWritable
booleanWritable
NullWritable
MapWritable
自定义数据类型
Top-N
数据
hello qianfeng hello qianfeng qianfeng is best qianfeng is better
hadoop is good
spark is nice
取统计后的前三名:
qianfeng 4
is 3
hello 2
shift + ctrl + s
重写下无参构造有参构造方法
getset方法
hashcode 和 equals方法
toString
…
我也不知道有什么用
先写的word 第二个函数第一句也写word 所以叫顺序一样
个数一样
类型一样
vi ./tn
hdfs dfs -put /home/tn /tn
yarn jar /home/wc.jar qf.com.mr.TopNDemo /tn /out/23
运行
看结果
hdfs dfs -put /home/tn /tn
结果是对的
不是 运行是对的
但是我们的代码有问题
qianfeng 4
is 4
value相同时就留下了一个
这个问题
所以要改改
但是 改哪里呢
不知道
请赐教
知道为什么了吗
因为Set集合
不能存放重复的值!!!!
具体的是:
看最下面贴的代码
我们使用TreeSet存放Reducer的最终输出
而在存放值时
我们自定义的TopNWritable类中使用的CompareTo比较的是
public int compareTo(TopNWritable o) {
return o.counter - this.counter;//倒排
}
是比较值
就比如说有两个单词出现4次
那么4 - 4 = 0
重复了 就不放进去了
所以 出现了 上面的情况
再看看截图的解释
所以吧 我们就改改数据把 哈哈哈
看看
运行
yarn jar /home/wc.jar qf.com.mr.TopNDemo /tn /out/34
看结果
hdfs dfs -cat /out/34/part-r-00000
没问题了 哦了
…
数据
hello qianfeng hello qianfeng qianfeng is best qianfeng better
hadoop is good
spark is nice
取统计后的前三名:
qianfeng 4
is 3
hello 2
- 类说明:
- 1.自定义数据类型需要实现Writable(不能排序)、或者WritableComparable(可排序)
- 2.实现该实现的方法 write readFields compareTo
- 3.方法write readFields 里面的字段个数和顺序要一致 类型要匹配
- 4.可以重写toString equal hashcode等方法
package qf.com.mr;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import org.apache.hadoop.io.WritableComparable;
/*
*@author Shishuai E-mail:1198319583@qq.com
*@version Create time : 2019年5月30日下午5:13:33
*类说明:
*1.自定义数据类型需要实现Writable(不能排序)、或者WritableComparable(可排序)
*2.实现该实现的方法 write readFields compareTo
*3.方法write readFields 里面的字段个数和顺序要一致 类型要匹配
*4.可以重写toString equal hashcode等方法
*
*
*
*Top-N
数据
hello qianfeng hello qianfeng qianfeng is best qianfeng is better
hadoop is good
spark is nice
取统计后的前三名:
qianfeng 4
is 3
hello 2
*/
public class TopNWritable implements WritableComparable<TopNWritable>{
public String word;
public int counter;
public TopNWritable() {
}
public TopNWritable(String word, int counter) {
super();
this.word = word;
this.counter = counter;
}
/**
* 序列化
*/
public void write(DataOutput out) throws IOException {
out.writeUTF(this.word);
out.writeInt(this.counter);
}
/**
* 反序列化
*/
public void readFields(DataInput in) throws IOException {
this.word = in.readUTF();
this.counter = in.readInt();
}
public int compareTo(TopNWritable o) {
return o.counter - this.counter;//倒排
}
public String getWord() {
return word;
}
public void setWord(String word) {
this.word = word;
}
public int getCounter() {
return counter;
}
public void setCounter(int counter) {
this.counter = counter;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + counter;
result = prime * result + ((word == null) ? 0 : word.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
TopNWritable other = (TopNWritable) obj;
if (counter != other.counter)
return false;
if (word == null) {
if (other.word != null)
return false;
} else if (!word.equals(other.word))
return false;
return true;
}
@Override
public String toString() {
return "[" + word + "\t" + counter + "]";
}
}
- 类说明:Top-N
- 就是求前几名
package qf.com.mr;
import java.io.IOException;
import java.util.TreeSet;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
/*
*@author Shishuai E-mail:1198319583@qq.com
*@version Create time : 2019年5月28日下午5:42:34
*类说明:Top-N
*就是求前几名
....
*/
public class TopNDemo implements Tool {
/**
* map阶段
*
* @author HP
*
*/
public static class MyMapper extends Mapper<LongWritable, Text, Text, Text> {
@Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
String line = value.toString();
String words[] = line.split(" ");
for (String s : words) {
context.write(new Text(s), new Text(1 + ""));
}
}
}
/**
* reduce阶段
*/
public static class MyReducer extends Reducer<Text, Text, TopNWritable, NullWritable> {
public static final int TOP_N = 3;
//定义一个最终输出结果
TreeSet<TopNWritable> ts = new TreeSet<TopNWritable>();
@Override
protected void reduce(Text key, Iterable<Text> values, Context context)
throws IOException, InterruptedException {
int counter = 0;
for (Text t : values) {
counter += Integer.parseInt(t.toString());
}
//构造最终输出类型
TopNWritable tn = new TopNWritable(key.toString(), counter);
//将tn对象添加到ts
ts.add(tn);
//如果ts里面的数据个数大于TOP_N的时候将移除最后一个(最大的在上面)
if(ts.size() > TOP_N) {
ts.remove(ts.last());
}
//context.write(tn, NullWritable.get());
}
@Override
protected void cleanup(Context context)
throws IOException, InterruptedException {
//循环打印ts中的元素
for (TopNWritable tn : ts) {
context.write(tn, NullWritable.get());
}
}
}
public void setConf(Configuration conf) {
// 对conf的属性设置
conf.set("fs.defaultFS", "hdfs://qf");
conf.set("dfs.nameservices", "qf");
conf.set("dfs.ha.namenodes.qf", "nn1, nn2");
conf.set("dfs.namenode.rpc-address.qf.nn1", "hadoop01:9000");
conf.set("dfs.namenode.rpc-address.qf.nn2", "hadoop02:9000");
conf.set("dfs.client.failover.proxy.provider.qf",
"org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider");
}
public Configuration getConf() {
return new Configuration();
}
public int run(String[] args) throws Exception {
// 1.获取配置对象信息
Configuration conf = new Configuration();
// 2.对conf进行设置(没有就不用)
// 3.获取job对象 (注意导入的包)
Job job = Job.getInstance(conf, "job");
// 4.设置job的运行主类
job.setJarByClass(TopNDemo.class);
// set inputpath and outputpath
setInputAndOutput(job, conf, args);
// System.out.println("jiazai finished");
// 5.对map阶段进行设置
job.setMapperClass(MyMapper.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(Text.class);
// System.out.println("map finished");
// 6.对reduce阶段进行设置
job.setReducerClass(MyReducer.class);
job.setOutputKeyClass(TopNWritable.class);
job.setOutputValueClass(NullWritable.class);
return job.waitForCompletion(true) ? 0 : 1;
}
// 主方法
public static void main(String[] args) throws Exception {
int isok = ToolRunner.run(new Configuration(), new TopNDemo(), args);
System.out.println(isok);
}
/**
* 处理参数的方法
*
* @param job
* @param conf
* @param args
*/
private void setInputAndOutput(Job job, Configuration conf, String[] args) {
if (args.length != 2) {
System.out.println("usage:yarn jar /*.jar package.classname /* /*");
return;
}
// 正常处理输入输出参数
try {
FileInputFormat.addInputPath(job, new Path(args[0]));
FileSystem fs = FileSystem.get(conf);
Path outputpath = new Path(args[1]);
if (fs.exists(outputpath)) {
fs.delete(outputpath, true);
}
FileOutputFormat.setOutputPath(job, outputpath);
} catch (Exception e) {
e.printStackTrace();
}
}
}