如何修复NoSuchMethodError:org.apache.hadoop.mapred.InputSplit.write

时间:2017-12-31 13:54:48

标签: java hadoop mapreduce nosuchmethoderror

我在hadoop上写了一个项目。我有一个1d字符串数组。名字是"字样" .I

想将它发送到reducer但我收到此错误:

Exception in thread "main" java.lang.NoSuchMethodError:org.apache.hadoop.mapred .InputSplit.write(Ljava/io/DataOutput;)V

我该怎么办?

任何人都可以帮助我吗?

这是我的映射器:

 public  abstract  class Mapn  implements Mapper<LongWritable, Text, Text, Text>{
@SuppressWarnings("unchecked")
public void map(LongWritable key, Text value, Context con) throws IOException, InterruptedException

        {                   
            String line = value.toString();
            String[] words=line.split(",");
            for(String word: words )
            {
                  Text outputKey = new Text(word.toUpperCase().trim());

              con.write(outputKey, words);
            }
            }




            }

1 个答案:

答案 0 :(得分:0)

当我学习hadoop mapreduce工具时,我会编写自己的程序,而不是传统的WordCount程序,然后为此导出jar。那么现在,我正在分享我用hadoop-1.2.1 jar依赖项编写的程序。它用于转换数字并用单词写出,这是在4个lacs数字处理而没有任何单个错误。

所以这是程序:

package com.whodesire.count;

import java.io.IOException;
import java.util.StringTokenizer;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.GenericOptionsParser;

import com.whodesire.numstats.AmtInWords;

public class CountInWords {

    public static class NumberTokenizerMapper 
                    extends Mapper <Object, Text, LongWritable, Text> {

        private static final Text theOne = new Text("1");
        private LongWritable longWord = new LongWritable();

        public void map(Object key, Text value, Context context) {

            try{
                StringTokenizer itr = new StringTokenizer(value.toString());
                while (itr.hasMoreTokens()) {
                    longWord.set(Long.parseLong(itr.nextToken()));
                    context.write(longWord, theOne);
                }
            }catch(ClassCastException cce){
                System.out.println("ClassCastException raiseddd...");
                System.exit(0);
            }catch(IOException | InterruptedException ioe){
                ioe.printStackTrace();
                System.out.println("IOException | InterruptedException raiseddd...");
                System.exit(0);
            }
        }
    }

    public static class ModeReducerCumInWordsCounter 
            extends Reducer <LongWritable, Text, LongWritable, Text>{
        private Text result = new Text();

        //This is the user defined reducer function which is invoked for each unique key
        public void reduce(LongWritable key, Iterable<Text> values, 
                Context context) throws IOException, InterruptedException {

            /*** Putting the key, which is a LongWritable value, 
                        putting in AmtInWords constructor as String***/
            AmtInWords aiw = new AmtInWords(key.toString());
            result.set(aiw.getInWords());

            //Finally the word and counting is sent to Hadoop MR and thus to target
            context.write(key, result);
        }
    }

    public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {

        /****
         *** all random numbers generated inside input files has been
         *** generated using url https://andrew.hedges.name/experiments/random/
         ****/

        //Load the configuration files and add them to the the conf object
        Configuration conf = new Configuration();       

        String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs();

        Job job = new Job(conf, "CountInWords");

        //Specify the jar which contains the required classes for the job to run.
        job.setJarByClass(CountInWords.class);

        job.setMapperClass(NumberTokenizerMapper.class);
        job.setCombinerClass(ModeReducerCumInWordsCounter.class);
        job.setReducerClass(ModeReducerCumInWordsCounter.class);

        //Set the output key and the value class for the entire job
        job.setMapOutputKeyClass(LongWritable.class);
        job.setMapOutputValueClass(Text.class);

        //Set the Input (format and location) and similarly for the output also
        FileInputFormat.addInputPath(job, new Path(otherArgs[0]));
        FileOutputFormat.setOutputPath(job, new Path(otherArgs[1]));

        //Setting the Results to Single Target File
        job.setNumReduceTasks(1);

        //Submit the job and wait for it to complete
        System.exit(job.waitForCompletion(true) ? 0 : 1);       
    }
}

我建议你查看你添加的hadoop jar,特别是在hadoop-core-x.x.x.jar上,因为在看了你的错误后,你似乎没有在项目中添加一些mapreduce jar。