java.lang.NoSuchMethodException: MapReduce.WordCount$MyMapper.<init>()

今天在YARN平台上测试了一个程序–wordcount:

package MapReduce;

import java.io.IOException;


import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.apache.hadoop.mapreduce.lib.partition.HashPartitioner;

public class WordCount
{
     public static String path1 = "hdfs://hadoop11:9000/dirdata";//读取HDFS中的测试集
     public static String path2 = "hdfs://hadoop11:9000/worddir";
     public static void main(String[] args) throws Exception
     {
         Configuration conf = new Configuration();
         conf.set("fs.defaultFS", "hdfs://hadoop11:9000");
         FileSystem fileSystem = FileSystem.get(conf);//获取HDFS的一个客户端实例
         if(fileSystem.exists(new Path(path2)))
         {
             fileSystem.delete(new Path(path2), true);
         }
         Job job = Job.getInstance(conf);
         job.setJarByClass(WordCount.class);

         FileInputFormat.setInputPaths(job, new Path(path1));
         job.setInputFormatClass(TextInputFormat.class);
         job.setMapperClass(MyMapper.class);
         job.setMapOutputKeyClass(Text.class);
         job.setMapOutputValueClass(LongWritable.class);

         job.setNumReduceTasks(1);
         job.setPartitionerClass(HashPartitioner.class);


         job.setReducerClass(MyReducer.class);
         job.setOutputKeyClass(Text.class);
         job.setOutputValueClass(LongWritable.class);
         job.setOutputFormatClass(TextOutputFormat.class);
         FileOutputFormat.setOutputPath(job, new Path(path2));
         job.waitForCompletion(true);

     }    
     public  class MyMapper extends Mapper<LongWritable, Text, Text, LongWritable>
     {
        protected void map(LongWritable k1, Text v1,Context context)throws IOException, InterruptedException
        {

                 String[] splited = v1.toString().split("\t");
                 for (String string : splited)
                {
                     context.write(new Text(string),new LongWritable(1L));
                }
        }     
     }
     public  class MyReducer extends Reducer<Text, LongWritable, Text, LongWritable>
     {
        protected void reduce(Text k2, Iterable<LongWritable> v2s,Context context)throws IOException, InterruptedException
        {
                 long sum = 0L;
                 for (LongWritable v2 : v2s)
                {
                    sum += v2.get();
                }
                context.write(k2,new LongWritable(sum));
        }
     }
}

运行结果:

Error: java.lang.RuntimeException: java.lang.NoSuchMethodException: MapReduce.WordCount$MyMapper.<init>()
        at org.apache.hadoop.util.ReflectionUtils.newInstance(ReflectionUtils.java:131)
        at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:722)
        at org.apache.hadoop.mapred.MapTask.run(MapTask.java:340)
        at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:167)
        at java.security.AccessController.doPrivileged(Native Method)
        at javax.security.auth.Subject.doAs(Subject.java:415)
        at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1556)
        at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:162)
Caused by: java.lang.NoSuchMethodException: MapReduce.WordCount$MyMapper.<init>()
        at java.lang.Class.getConstructor0(Class.java:2800)
        at java.lang.Class.getDeclaredConstructor(Class.java:2043)
        at org.apache.hadoop.util.ReflectionUtils.newInstance(ReflectionUtils.java:125)
        ... 7 more

找了很长时间:终于发现MyMapper类与MyReducer类前面忘加了static,加上之后,程序运行正确!

package MapReduce;

import java.io.IOException;


import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.apache.hadoop.mapreduce.lib.partition.HashPartitioner;

public class WordCount
{
     public static String path1 = "hdfs://hadoop11:9000/dirdata";//读取HDFS中的测试集
     public static String path2 = "hdfs://hadoop11:9000/worddir";
     public static void main(String[] args) throws Exception
     {
         Configuration conf = new Configuration();
         conf.set("fs.defaultFS", "hdfs://hadoop11:9000");
         FileSystem fileSystem = FileSystem.get(conf);//获取HDFS的一个客户端实例
         if(fileSystem.exists(new Path(path2)))
         {
             fileSystem.delete(new Path(path2), true);
         }
         Job job = Job.getInstance(conf);
         job.setJarByClass(WordCount.class);

         FileInputFormat.setInputPaths(job, new Path(path1));
         job.setInputFormatClass(TextInputFormat.class);
         job.setMapperClass(MyMapper.class);
         job.setMapOutputKeyClass(Text.class);
         job.setMapOutputValueClass(LongWritable.class);

         job.setNumReduceTasks(1);
         job.setPartitionerClass(HashPartitioner.class);


         job.setReducerClass(MyReducer.class);
         job.setOutputKeyClass(Text.class);
         job.setOutputValueClass(LongWritable.class);
         job.setOutputFormatClass(TextOutputFormat.class);
         FileOutputFormat.setOutputPath(job, new Path(path2));
         job.waitForCompletion(true);

     }    
     public  static  class MyMapper extends Mapper<LongWritable, Text, Text, LongWritable>
     {
        protected void map(LongWritable k1, Text v1,Context context)throws IOException, InterruptedException
        {

                 String[] splited = v1.toString().split("\t");
                 for (String string : splited)
                {
                     context.write(new Text(string),new LongWritable(1L));
                }
        }     
     }
     public  static class MyReducer extends Reducer<Text, LongWritable, Text, LongWritable>
     {
        protected void reduce(Text k2, Iterable<LongWritable> v2s,Context context)throws IOException, InterruptedException
        {
                 long sum = 0L;
                 for (LongWritable v2 : v2s)
                {
                    sum += v2.get();
                }
                context.write(k2,new LongWritable(sum));
        }
     }
}
  • 5
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

一只懒得睁眼的猫

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值