大数据之Hadoop05_MapReduce经典案例

1. 将以下日志信息按手机号进行流量汇总

在这里插入图片描述

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import java.io.IOException;
/**
 * Map类,统计每行每个手机号流量的流量加总,然后输出到缓存区中
 */
class FlowMap extends Mapper <LongWritable, Text,Text,LongWritable>{
    @Override
    protected void map(LongWritable key, Text value, Context context)  {
        String line = value.toString();
        try {//将切分流程try起来,避免脏数据影响程序运行
        //以空白符切分
        String[] split = line.split("\\s+");
        //取到手机号
        String s = split[1];
        //取到上行流量
        long l1 = Long.parseLong(split[split.length - 2]);
        //取到下行流量
        long l2 = Long.parseLong(split[split.length - 3]);
        //将两个流量加总后作为value输出
            context.write(new Text(s),new LongWritable(l1+l2));
        } catch (Exception e) {
            System.out.println(line);
       }
    }
}

/**
 * Reduce类,将Map类发送过来的流量进行聚合运算
 */
class FlowReduce extends Reducer<Text,LongWritable,Text,LongWritable> {
    @Override
    protected void reduce(Text key, Iterable<LongWritable> values, Context context) throws IOException, InterruptedException {
        long flows = 0L;
        for (LongWritable value : values) {
            flows+=value.get();
        }
        context.write(key,new LongWritable(flows));
    }
}
/**
 * 启动类
 */
public class Flow {
    public static void main(String[] args) throws Exception {
        Configuration conf = new Configuration();
        Job job = Job.getInstance(conf, "l");
        job.setMapperClass(FlowMap.class);
        job.setReducerClass(FlowReduce.class);
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(LongWritable.class);
        FileInputFormat.setInputPaths(job,new Path("D:\\txt\\mrdata\\flow\\input"));
        FileOutputFormat.setOutputPath(job,new Path("D:\\txt\\mrdata\\flow\\output6"));
        job.waitForCompletion(true);
    }
}

2.处理分析以下数据,计算出每个区间的峰值

在这里插入图片描述

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import java.io.IOException;

public class Line {
    /**
     * Map类,将两数字按","切开,用for循环遍历两数字中间的部分,
     * 并将每遍历出的每个数字都作为key,value为数量1输出到缓存区中
     */
    static class LineMap extends Mapper<LongWritable, Text,Text, IntWritable> {
        Text k = new Text();
        IntWritable v = new IntWritable(1);
        @Override
        protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
            try {
                String s = value.toString();
                String[] split = s.split(",");
                int i = Integer.parseInt(split[0]);
                int i2 = Integer.parseInt(split[1]);

                for (int j = i; j <=i2 ; j++) {
                    k.set(j+"");
                    context.write(k,v);
                }
            } catch (Exception e) {
                System.out.println(value.toString());
            }
        }
    }

    /**
     * 将Map类传过来的数据进行聚合运算,即可得出,每个数字出现的次数
     */
    static class LineReduce extends Reducer<Text, IntWritable,Text, IntWritable> {
        IntWritable v = new IntWritable();
        @Override
        protected void reduce(Text key, Iterable<IntWritable> values, Context context) throws IOException, InterruptedException {
            int count=0;
            for (IntWritable value : values) {
                count++;
            }
            v.set(count);
            context.write(key,v);
        }
    }

    public static void main(String[] args) throws Exception {
        Configuration conf = new Configuration();
        Job job = Job.getInstance(conf, "l");
        job.setMapperClass(LineMap.class);
        job.setReducerClass(LineReduce.class);
        job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(IntWritable.class);
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(IntWritable.class);
        FileInputFormat.setInputPaths(job,new Path("D:\\txt\\mrdata\\line\\input\\"));
        FileOutputFormat.setOutputPath(job,new Path("D:\\txt\\mrdata\\line\\output13\\"));
        job.waitForCompletion(true);
    }
}

3.电影评分案例

在这里插入图片描述

import com.alibaba.fastjson.JSON;
import org.apache.commons.beanutils.BeanUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.*;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.util.*;
/**
 * movieBean实现Writable接口,重写write和readFields方法,定义Hadoop序列化的格式
 */
public class MovieBean implements Writable {
    private String movie;
    private double rate;
    private String timeStamp;
    private String uid;

    @Override
    public String toString() {
        return "MovieBean{" +
                "movie='" + movie + '\'' +
                ", rate=" + rate +
                ", timeStamp='" + timeStamp + '\'' +
                ", uid='" + uid + '\'' +
                '}';
    }

    public String getMovie() {
        return movie;
    }

    public void setMovie(String movie) {
        this.movie = movie;
    }

    public double getRate() {
        return rate;
    }

    public void setRate(double rate) {
        this.rate = rate;
    }

    public String getTimeStamp() {
        return timeStamp;
    }

    public void setTimeStamp(String timeStamp) {
        this.timeStamp = timeStamp;
    }

    public String getUid() {
        return uid;
    }

    public void setUid(String uid) {
        this.uid = uid;
    }

    public MovieBean() {
    }

    public MovieBean(String movie, double rate, String timeStamp, String uid) {
        this.movie = movie;
        this.rate = rate;
        this.timeStamp = timeStamp;
        this.uid = uid;
    }

    @Override//重写write方法,按Hadoop的序列化标准将对象序列化到文件中
    public void write(DataOutput dataOutput) throws IOException {
        dataOutput.writeUTF(movie);
        dataOutput.writeDouble(rate);
        dataOutput.writeUTF(timeStamp);
        dataOutput.writeUTF(uid);
    }

    @Override//重写readFields方法,按Hadoop的反序列化标准将对象从文件中读取出来
    public void readFields(DataInput dataInput) throws IOException {
        this.movie=dataInput.readUTF();
        this.rate=dataInput.readDouble();
        this.timeStamp=dataInput.readUTF();
        this.uid=dataInput.readUTF();
    }
}

(1)计算出每部电影的平均分

     /**
     * MovieMapper类将电影名作为key,MovieBean作为value输出到缓存区
     */
class MovieMapper extends Mapper<LongWritable,Text,Text,MovieBean>{
        Text k =new Text();
        @Override
        protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {

            try {  //分析数据时,可能存在脏数据,将他们try起来,避免脏数据报错
                String s = value.toString();
                //因为该数据格式为JSON格式,所以我们可以将读取到的一行数据转换MovieBean对象,
                //将MovieBean对象作为value输出出去
                MovieBean mb = JSON.parseObject(s, MovieBean.class);
                k.set(mb.getMovie());
                context.write(k,mb);
            } catch (Exception e) {
                System.out.println(value.toString());
            }
        }
    }
    /**
     * MovieRateAvgReduce聚合运算出总分,然后根据评分次数,计算出平均值,然后后以电影名为key,平均分为value输出
     */
    class MovieRateAvgReduce extends Reducer<Text,MovieBean,Text, DoubleWritable> {
        DoubleWritable v = new DoubleWritable();
        @Override
        protected void reduce(Text key, Iterable<MovieBean> values, Context context) throws IOException, InterruptedException {
            int count = 0;
            double num = 0;
            for (MovieBean value : values) {
                double rate = value.getRate();
                num+=rate;
                count++;
            }
            double avg = num/count;
            v.set(avg);
            context.write(key,v);
        }
    }

(2)求每部电影评论最高分的前几条数据记录

 /**
     * MovieMapper类将电影名作为key,MovieBean作为value输出到缓存区
     */
class MovieMapper extends Mapper<LongWritable,Text,Text,MovieBean>{
        Text k =new Text();
        @Override
        protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {

            try {  //分析数据时,可能存在脏数据,将他们try起来,避免脏数据报错
                String s = value.toString();
                //因为该数据格式为JSON格式,所以我们可以将读取到的一行数据转换MovieBean对象,
                //将MovieBean对象作为value输出出去
                MovieBean mb = JSON.parseObject(s, MovieBean.class);
                k.set(mb.getMovie());
                context.write(k,mb);
            } catch (Exception e) {
                System.out.println(value.toString());
            }
        }
    }
	 /**
     * MovieRateTopNReduce聚合运算出每部电影评论最高分的前几条数据
     * 代码思路:要求前几条记录,则需要对每部电影的评分进行排序,然后取出前几条,此时就需要用到List集合
     */
    class MovieRateTopNReduce extends Reducer<Text,MovieBean,MovieBean, NullWritable>{
        @Override
        protected void reduce(Text key, Iterable<MovieBean> values, Context context)  {
            try {
                List<MovieBean> movieBeans = new ArrayList<>();
                for (MovieBean mb : values) {
                    //因为迭代器中只有一个对象,所有需要在循环内new一个新对象,并传入属性值,然后放入List集合中,
                    //不然会导致集合中存进去的始终是那一个对象,且属性值都为最后一跳数据
                    MovieBean movieBean = new MovieBean();
                    //对象属性复制,第一个参数是目标对象,第二参数是源对象
                    BeanUtils.copyProperties(movieBean,mb);
                    //装进List集合
                    movieBeans.add(movieBean);
                }
                //对List集合按评分降序排序
                movieBeans.sort(new Comparator<MovieBean>() {
                    @Override
                    public int compare(MovieBean o1, MovieBean o2) {
                        //对double类型排序方法
                        return Double.compare(o2.getRate(),o1.getRate());
                    }
                });
                //Integer.min(3,movieBeans.size()),取出括号中较小的那一个数,即可取出前三条数据,或没有三条数据
                for (int i = 0; i < Integer.min(3,movieBeans.size()); i++) {
                    context.write(movieBeans.get(i),NullWritable.get());
                }
            } catch (Exception e) {
                e.printStackTrace();
            }
        }
    }

(3)求出电影评论记录数最多的n部电影,并输出评论数量

    /**
     * Mapper类,以电影名为key,电影数量1为value输出
     */
    static class MovieRateCountTopNMapper extends Mapper<LongWritable,Text,Text,IntWritable>{
        Text k = new Text();
        IntWritable v = new IntWritable(1);
        @Override
        protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
            try {
                String s = value.toString();
                MovieBean movieBean = JSON.parseObject(s, MovieBean.class);
                k.set(movieBean.getMovie());
                context.write(k,v);
            } catch (Exception e) {
                System.out.println(value.toString());
            }
        }
    }

    /**
     * Reducer类,将每部电影的评论数量进行聚合,并放入map集合中
     */
    static class MovieRateCountTopNReduce extends Reducer<Text,IntWritable,Text,IntWritable>{
        Map<String,Integer> m = new HashMap<String,Integer>();
        @Override
        protected void reduce(Text key, Iterable<IntWritable> values, Context context)  {
            int count = 0;
            for (IntWritable value : values) {
                count++;
            }
            m.put(key.toString(),count);
        }

        /**
         * cleanup方法,该方法会在map方法循环执行完后,被执行一次
         * 对上方生成的map集合进行评论数量的降序排序,然后遍历map集合输出key和value
         * @param context
         * @throws IOException
         * @throws InterruptedException
         */
        @Override
        protected void cleanup(Context context) throws IOException, InterruptedException {
            Text k = new Text();
            IntWritable v = new IntWritable();
            Set<Map.Entry<String, Integer>> entries = m.entrySet();
            List<Map.Entry<String, Integer>> l = new ArrayList<>(entries);
            l.sort(new Comparator<Map.Entry<String, Integer>>() {
                @Override
                public int compare(Map.Entry<String, Integer> o1, Map.Entry<String, Integer> o2) {
                    return o2.getValue()-o1.getValue();
                }
            });
            for (Map.Entry<String, Integer> ll : l) {
                k.set(ll.getKey());
                v.set(ll.getValue());
                context.write(k,v);
            }
        }
    }

    public static void main(String[] args) throws Exception {
        Configuration conf = new Configuration();

        Job job = Job.getInstance(conf, "m");
        job.setMapperClass(MovieRateCountTopNMapper.class);
        job.setReducerClass(MovieRateCountTopNReduce.class);

        job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(IntWritable.class);

        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(IntWritable.class);

        FileInputFormat.setInputPaths(job,new Path("D:\\txt\\mrdata\\movie\\input"));

        FileOutputFormat.setOutputPath(job,new Path("D:\\txt\\mrdata\\movie\\output5"));

        job.waitForCompletion(true);
    }
}

4.统计三个文件中的单词数量,并显示该单词属于哪个文件,并按数量降序排序,得出如下结果:

在这里插入图片描述

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import java.io.IOException;
import java.util.*;

public class Word {
    /**
     * Mapper类,读取文件,每次读一行
     * 最后以文件名和单词进行拼接作为key,数量为value,输出
     */
    static class WordCountMapper extends Mapper<LongWritable, Text,Text, IntWritable> {
        String fileName ;
        Text k = new Text();
        IntWritable v = new IntWritable(1);
        @Override//setup方法,该方法会在map方法循环执行前,被执行一次
        protected void setup(Context context) throws IOException, InterruptedException {
            //获取读取的文件对象
            FileSplit file = (FileSplit)context.getInputSplit();
            //获取到文件的路径和文件名
            fileName = file.getPath().getName();
        }

        @Override
        protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
            try {//将切割流程try起来,避免脏数据影响程序运行
                String s = value.toString();
                //以空格切割,将每行的单词分开
                String[] split = s.split("\\s+");
                //数组内每个元素都是一个单词
                for (String s1 : split) {
                    //将每个单词,和他的文件名进行拼接成K,输出到缓存区
                    String fileWord = s1+"-"+fileName;
                    k.set(fileWord);
                    context.write(k,v);
                }
            } catch (Exception e) {
                e.printStackTrace();
            }
        }
    }
    /**
     * Reduce类,将分区排序后的数据进行聚合后输出
     */
    static class WordCountReduce extends Reducer<Text, IntWritable,Text, IntWritable> {

        IntWritable v = new IntWritable();
        @Override
        protected void reduce(Text key, Iterable<IntWritable> values, Context context) throws IOException, InterruptedException {
                int count = 0;
            for (IntWritable value : values) {
                //将迭代器中的1进行叠加聚合
                count++;
            }
            v.set(count);
            context.write(key,v);
        }
    }
//聚合后得到如下数据

在这里插入图片描述

     /**
     * 文件被上面处理后就会得到单词加文件名为key的总数统计,但是单词还是有重复的数据显示在文件中,
     * 因为我们是以单词加文件名为key进行的聚合操作
     * 所以我们现在需要将第一次处理后的文件再读一次,进行二次聚合,
     * 以单词为key,文件名加该文件下所对应的单词数量进行输出
     */
    static class WordCountMapper2 extends Mapper<LongWritable,Text,Text,Text>{
        Text k = new Text();
        Text v = new Text();
        @Override
        protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
            try {
                String s = value.toString();
                //用"-"将单词和文件名切开
                String[] split = s.split("-");
                //以单词为key,文件名和数量为value输出到缓存区
                k.set(split[0]);
                v.set(split[1]);
                context.write(k,v);
            } catch (Exception e) {
                e.printStackTrace();
            }
        }
    }

    /**
     * 将缓存区内分区排序后的数据进行聚合运算
     */
    static class WordCountReduce2 extends Reducer<Text,Text,Text,Text>{

        Text v = new Text();
        @Override
        protected void reduce(Text key, Iterable<Text> values, Context context) throws IOException, InterruptedException {
            Map<String, Integer> map = new HashMap<>();
            //将按单词,分组后的文件名加数量的迭代器遍历放入map集合中
            for (Text value : values) {
                String s = value.toString();
                String[] split = s.split("\\s");
                map.put(split[0],Integer.parseInt(split[1]));
            }
            System.out.println(map.size());
            //将map集合转换成set单链集合
            Set<Map.Entry<String, Integer>> entries = map.entrySet();
            //将set集合转换为list集合
            ArrayList<Map.Entry<String, Integer>> list = new ArrayList<>(entries);
            //对map集合的value逆序排序
            list.sort(new Comparator<Map.Entry<String, Integer>>() {
                @Override
                public int compare(Map.Entry<String, Integer> o1, Map.Entry<String, Integer> o2) {
                    return o2.getValue()-o1.getValue();
                }
            });
            //new StringBuilder将集合中的数据转换为字符串连接起来
            StringBuilder sb = new StringBuilder();
            //遍历排序后的list集合,放入key和value到context中输出出去
            for (Map.Entry<String, Integer> l : list) {

                sb.append(l.getKey()+"-"+l.getValue()+" ");
                //去除sb的最后一个空格
                String vs = sb.toString().trim();
                String vv = sb.toString();
                v.set(vv);
            }
            context.write(key,v);
        }
    }

    /**
     * 启动类
     * @param args
     * @throws Exception
     */
    public static void main(String[] args) throws Exception {
        Configuration conf = new Configuration();
        Job job = Job.getInstance(conf, "m");
        job.setMapperClass(WordCountMapper2.class);
        job.setReducerClass(WordCountReduce2.class);
        //当map和reduce类输出的类型相同时,可以省略以下两句
        //job.setMapOutputKeyClass(Text.class);
        //job.setMapOutputValueClass(IntWritable.class);
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(Text.class);
        FileInputFormat.setInputPaths(job,new Path("D:\\txt\\mrdata\\index\\output3"));
        FileOutputFormat.setOutputPath(job,new Path("D:\\txt\\mrdata\\index\\output17"));
        job.waitForCompletion(true);
    }
}

Join案例

将以下两个文件进行根据uid整合成一个文件,统计出每个用户,购买的商品
在这里插入图片描述
在这里插入图片描述

import org.apache.commons.beanutils.BeanUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;

/**
 *
 */
public class Join2 {
    
        static class JoinMapper extends Mapper<LongWritable, Text, Text, JoinBeans> {
            String fileName = null;
            /**
             * 获取当前任务处理数据的文件名
             *
             * @param context
             * @throws IOException
             * @throws InterruptedException
             */
            @Override
            protected void setup(Context context) throws IOException, InterruptedException {
                FileSplit file = (FileSplit) context.getInputSplit();
                fileName = file.getPath().getName();
            }
            Text k = new Text();
            JoinBeans jb = new JoinBeans();
            @Override
            protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
                String line = value.toString();
                //根据文件名分别处理数据
                if (fileName != null) {
                    if (fileName.startsWith("user")) {// 处理用户数据
                        String[] split = line.split(",");
                        String uid = split[0];
                        String name = split[1];
                        int age = Integer.parseInt(split[2]);
                        String gender = split[3];
                        String friend = split[4];
                        String fileName = "user";
                        k.set(uid);
                        //注意 oid字段
                        jb.set(  "null",uid,name, age, gender, friend, fileName);
                        context.write(k, jb);
                    } else { // 处理订单数据
                        String[] split = line.split(",");
                        String oid = split[0];
                        String uid = split[1];
                        String fileName = "orders";
                        k.set(uid);
                        jb.set( oid,uid, "null", -1, "null", "null", fileName);
                        context.write(k, jb);
                    }
                } else {
                    return;
                }
                //
            }
        }

        /**
         * 输出的KEY  拼接好的一行数据
         */

        static class JoinReducer extends Reducer<Text, JoinBeans, Text, NullWritable> {
            /**
             * key  uid
             * values 当前用户的信息或者是订单
             *
             * @param key
             * @param values
             * @param context
             * @throws IOException
             * @throws InterruptedException
             */
            Text k = new Text();
            @Override
            protected void reduce(Text key, Iterable<JoinBeans> values, Context context) throws IOException, InterruptedException {
                try {
                    JoinBeans user = new JoinBeans();
                    List<JoinBeans> ordersList = new ArrayList<>();
                    for (JoinBeans value : values) {
                        // 判断Bean中存储的数据
                        String fileName = value.getFilename();
                        if (fileName.equals("user")) { // 用户数据
                            //将数据封装在user对象中
                            BeanUtils.copyProperties(user, value);
                        } else { // 订单数据   多个订单  将订单存储在list中
                            // 获取每条订单数据
                            JoinBeans orders = new JoinBeans();
                            BeanUtils.copyProperties(orders, value);
                            // 将所有的订单数据存储在list集合中
                            ordersList.add(orders);
                        }
                    }
                    // 执行拼接的逻辑 遍历订单 拼接用户
                    if (ordersList.size() > 0) {
                        for (JoinBeans joinBeans : ordersList) {
                            String oid = joinBeans.getOid();
                            String res = oid + "," + user;
                            k.set(res);
                            context.write(k, NullWritable.get());
                        }
                    }
                } catch (Exception e) {
                    e.printStackTrace();
                }
            }
        }

        public static void main(String[] args) throws Exception {
            // 1 配置对象
            Configuration conf = new Configuration();
            // 2 创建任务对象
            Job job = Job.getInstance(conf, "join");
            // 2.1 设置 map和reduce任务类
            job.setMapperClass(JoinMapper.class);
            job.setReducerClass(JoinReducer.class);
            //2.2 设置map和reduce 的输出KV
            job.setMapOutputKeyClass(Text.class);
            job.setMapOutputValueClass(JoinBeans.class);
            job.setOutputKeyClass(Text.class);
            job.setOutputValueClass(NullWritable.class);
            // 2.3 设置reduce的个数  默认1
            //job.setNumReduceTasks(2);
             2.3 设置输入和输出路径
            FileInputFormat.setInputPaths(job, new Path("D:\\txt\\mrdata\\join\\input\\"));
            FileOutputFormat.setOutputPath(job, new Path("D:\\txt\\mrdata\\join\\output36"));
            // 3 提交任务  等待程序执行完毕   返回值是否成功
            boolean b = job.waitForCompletion(true);
            System.exit(b ? 0 : -1);
        }
    }
import org.apache.hadoop.io.Writable;

import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;

/**
 * @FileName: Confidential documents
 * @Author: JoKer.D
 * @Date: 2020/8/19 10:13
 * @Description:
 * u001,senge,18,male,angelababy
 */
public class JoinBeans implements Writable {
    private String oid;
    private String uid;
    private String name;
    private int age;
    private String gender;
    private String friend;
    private String filename;


    public void set(String oid, String uid, String name, int age, String gender, String friend, String filename) {
        this.oid = oid;
        this.uid = uid;
        this.name = name;
        this.age = age;
        this.gender = gender;
        this.friend = friend;
        this.filename = filename;
    }

    @Override
    public String toString() {
        return uid+","+name+","+age+","+friend;
    }

    public String getOid() {
        return oid;
    }

    public void setOid(String oid) {
        this.oid = oid;
    }

    public String getUid() {
        return uid;
    }

    public void setUid(String uid) {
        this.uid = uid;
    }

    public String getName() {
        return name;
    }

    public void setName(String name) {
        this.name = name;
    }

    public int getAge() {
        return age;
    }

    public void setAge(int age) {
        this.age = age;
    }

    public String getGender() {
        return gender;
    }

    public void setGender(String gender) {
        this.gender = gender;
    }

    public String getFriend() {
        return friend;
    }

    public void setFriend(String friend) {
        this.friend = friend;
    }

    public String getFilename() {
        return filename;
    }

    public void setFilename(String filename) {
        this.filename = filename;
    }

    @Override
    public void write(DataOutput t) throws IOException {
        t.writeUTF(oid);
        t.writeUTF(uid);
        t.writeUTF(name);
        t.writeInt(age);
        t.writeUTF(gender);
        t.writeUTF(friend);
        t.writeUTF(filename);
    }

    @Override
    public void readFields(DataInput t) throws IOException {
        this.oid=t.readUTF();
        this.uid=t.readUTF();
        this.name=t.readUTF();
        this.age=t.readInt();
        this.gender=t.readUTF();
        this.friend=t.readUTF();
        this.filename=t.readUTF();
    }
}
  • 0
    点赞
  • 11
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值