1、linux本地创建input、output文件夹
2、将本地input文件夹中的所有内容拷贝到 Hadoop的/in文件夹下
首先hadoop创建in文件夹
b
in/hadoop fs -mkdir /in
bin/hadoop fs -put ../input/* /in
3、查看Hadoop文件系统/in文件夹下的文件
bin/hadoop fs -ls /in
运行
wordcount:
bin/hadoop jar share/hadoop/mapreduce/hadoop-mapreduce-examples-2.6.3.jar wordcount /in/ /output/wordcount1
查看执行结果
hadoop fs -cat /output/wordcount1/*
————————————————————————————————————————————
自己编写mapreduce程序运行,基于hadoop2.6.3版本
reduce开始之前,map必须完成每读一行数据,调用一次Mapper类的map方法。
map 输入:key——>起始偏移量,Long;value——>一行数据,String;输出:key——>一个单词,String;value——>数量1,Long;public class WordCountMapper extends Mapper<Long, String, String, Long> {但是这些基本类型是不能直接用的,输入输出都是经过网络传递的,有序列化的过程,String、Long都是实现了java序列化接口的,可以在网上传,但是jdk的序列化机制在Hadoop下效率不是很高。Hadoop把这些序列化机制重新实现了一套,String、Long这些类型不符合Hadoop序列化机制。Long——>LongWritableString——>Textpublic class WordCountMapper extends Mapper<LongWritable, Text, Text, LongWritable>
reduce 输入:key——>一个单词,String;value——>数量1,Long;输出:key——>一个单词,String;value——>数量n,Long;
public class WordCountReduce extends Reducer<Text, LongWritable, Text, LongWritable> {
1、编写程序
package jvm.hadoop.starter;
import java.io.IOException;
import java.util.StringTokenizer;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
public class WordCountMapper extends Mapper<LongWritable, Text, Text, LongWritable> {
private static final LongWritable one = new LongWritable(1);
private Text word = new Text();
@Override
protected void map(LongWritable key, Text value,
Mapper<LongWritable, Text, Text, LongWritable>.Context context)
throws IOException, InterruptedException {
// 获取到一行文件的内容
String line = value.toString();
// 切分这一行的内容为一个单词数组
StringTokenizer itr = new StringTokenizer(line, " ");// 行数据,分隔符
// 遍历输出<word,1>
while (itr.hasMoreTokens()) {
this.word.set(itr.nextToken());
context.write(this.word, one);// map的输出也是<K,V>形式
}
}
}
package jvm.hadoop.starter;
import java.io.IOException;
import java.util.StringTokenizer;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
// key:hello, values:{1,1,1。。。。}
public class WordCountReduce extends Reducer<Text, LongWritable, Text, LongWritable> {
@Override
protected void reduce(Text key, Iterable<LongWritable> values,
Reducer<Text, LongWritable, Text, LongWritable>.Context context)
throws IOException, InterruptedException {
// 定义一个累加计数器
long count = 0;
for (LongWritable value : values) {
count += value.get();
}
// 输出<单词:count>键值对
context.write(key, new LongWritable(count));
}
public static void main(String[] args) {
StringTokenizer st = new StringTokenizer("this,is.a test", ",| ");
while (st.hasMoreTokens()) {
System.out.println(st.nextToken());
}
}
}
package jvm.hadoop.starter;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
/**
* 用来描述一个作业job(使用哪个Mapper类,哪个reducer类,输入文件在哪,输出结果放哪) ; 然后提交这个job给Hadoop集群
*
*/
// jvm.hadoop.starter.WordCountRunner
public class WordCountRunner {
public static void main(String[] args)
throws IOException, ClassNotFoundException, InterruptedException {
// 用静态方法产生实例
Configuration conf = new Configuration();
Job wordcount = Job.getInstance(conf);
// 当前job资源所在jar包:main方法所在的类
wordcount.setJarByClass(WordCountRunner.class);
// wordcount要使用哪个Mapper类、Reducer类
wordcount.setMapperClass(WordCountMapper.class);
wordcount.setReducerClass(WordCountReduce.class);
// wordcount的maper类输出的kv数据类型,reducer类的输出类型没写,代表跟mapper一样
// wordcount.setMapOutputKeyClass(Text.class);
// wordcount.setMapOutputKeyClass(LongWritable.class);
// wordcount的reducer类输出的kv数据类型
wordcount.setOutputKeyClass(Text.class);
wordcount.setOutputValueClass(LongWritable.class);
// 指定要处理的原始数据所存放的路径,都是文件夹级别的
FileInputFormat.setInputPaths(wordcount, "hdfs://master:9000/wc/srcdata");
// 指定处理之后的结果输出到哪个路径
FileOutputFormat.setOutputPath(wordcount, new Path("hdfs://master:9000/wc/output"));
boolean res = wordcount.waitForCompletion(true);
System.exit(res ? 0 : 1);
}
}
3、hdfs新建/wc/srcdata目录,并在该目录下上传一个要进行wordcount的文件
hadoop fs -mkdir /wc/srcdata
hadoop fs –put /opt/file.txt /wc/srcdata
4、执行命令运行
在/opt/workspace目录下执行hadoop jar wordcount.jar jvm.hadoop.starter.WordCountRunner
期间遇到的问题:
运行hadoop fs -put命令报no root to host
发现是从节点的防火墙没有关闭,关闭防火墙即可