Mapreduce程序导致错误

时间:2014-01-21 07:01:24

标签: java hadoop mapreduce

尝试Java MapReduce问题。当我使用以下命令编译代码时,我得到一些错误,它们列在下面..请帮帮我。提前致谢

源代码

package cvkumar.hadoopmr;
import java.io.IOException;
import java.util.StrinTokenizer;
import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.conf.*;
 import org.apache.hadoop.io.*;
 import org.apache.hadoop.mapred.*;
 import org.apache.hadoop.util.*;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.KeyValueTextInputForm;
import org.apache.hadoop.mapreduce.lib.output.FileOutFormat;
import org.apache.hadoop.util.GenericOptionsParser;

public class Dictionary 
{
    public static class WordMapper extends Mapper <Text, Text, Text, Text>
    {
        private Text word = new Text();
        public void map(Text key, Text value, Context context)
            throws IOException, InterruptedException
        {
            StringTokenizer itr = new StringTokenizer(value.toString(),",");
            while (itr.hasMoreTokens())
            {
                word.set(itr.nextToken());
                context.write(key,word);
            }
        }
    }
    public static class AllTranslationsReducer
        extends Reducer<Text,Text,Text,Text>
    {
            private Text result = new Text();
            public void reduce(Text key, Iterable<Text> values,Context context)
         throws IOException, InterruptedException
            {
                String translations = "";
                for (Text val : values)
                {
                    translations += "|"+val.toString();
                }
                result.set(translations);
                context.write(key, result);
            }
        }

    public static void main(String[] args) throws Exception
    {
            Configuration conf = new Configuration();
            Job job = new Job(conf, "dictionary");
            job.setJarByClass(Dictionary.class);
            job.setMapperClass(WordMapper.class);
            job.setReducerClass(AllTranslationsReducer.class);
            job.setOutputKeyClass(Text.class);
            job.setOutputValueClass(Text.class);
            job.setInputFormatClass(KeyValueTextInputFormat.class);
            //FileInputFormat.addInputPath(job, new Path("/tmp/hadoop-cscarioni/dfs/name/file"));
            //FileOutputFormat.setOutputPath(job, new Path("output"));
        FileInputFormat.addInputPath(job, new Path(args[0]));
        FileOutputFormat.setOutputPath(job, new Path(args[1]));
            System.exit(job.waitForCompletion(true) ? 0 : 1);
        }
}

错误

$ javac -classpath hadoop-core-1.2.1.jar -d ./Dictionary ./cvkumar/hadoopmr/Dictionary.java

hadoop@hadoop-Vostro1310:~/hadoop-1.2.1$ javac -classpath hadoop-core-1.2.1.jar -d ./Dictionary ./cvkumar/hadoopmr/Dictionary.java 
./cvkumar/hadoopmr/Dictionary.java:3: cannot find symbol
symbol  : class StrinTokenizer
location: package java.util
import java.util.StrinTokenizer;
                ^
./cvkumar/hadoopmr/Dictionary.java:15: cannot find symbol
symbol  : class KeyValueTextInputForm
location: package org.apache.hadoop.mapreduce.lib.input
import org.apache.hadoop.mapreduce.lib.input.KeyValueTextInputForm;
                                            ^
./cvkumar/hadoopmr/Dictionary.java:16: cannot find symbol
symbol  : class FileOutFormat
location: package org.apache.hadoop.mapreduce.lib.output
import org.apache.hadoop.mapreduce.lib.output.FileOutFormat;
                                             ^
./cvkumar/hadoopmr/Dictionary.java:27: cannot find symbol
symbol  : class StringTokenizer
location: class cvkumar.hadoopmr.Dictionary.WordMapper
            StringTokenizer itr = new StringTokenizer(value.toString(),",");
            ^
./cvkumar/hadoopmr/Dictionary.java:27: cannot find symbol
symbol  : class StringTokenizer
location: class cvkumar.hadoopmr.Dictionary.WordMapper
            StringTokenizer itr = new StringTokenizer(value.toString(),",");
                                      ^
./cvkumar/hadoopmr/Dictionary.java:61: setInputFormatClass(java.lang.Class<? extends org.apache.hadoop.mapreduce.InputFormat>) in org.apache.hadoop.mapreduce.Job cannot be applied to (java.lang.Class<org.apache.hadoop.mapred.KeyValueTextInputFormat>)
            job.setInputFormatClass(KeyValueTextInputFormat.class);
               ^
./cvkumar/hadoopmr/Dictionary.java:65: setOutputPath(org.apache.hadoop.mapred.JobConf,org.apache.hadoop.fs.Path) in org.apache.hadoop.mapred.FileOutputFormat cannot be applied to (org.apache.hadoop.mapreduce.Job,org.apache.hadoop.fs.Path)
        FileOutputFormat.setOutputPath(job, new Path(args[1]));
                        ^
7 errors

1 个答案:

答案 0 :(得分:1)

java编译器的答案已经存在。更改以下行:

第3行:

import java.util.StringTokenizer;

第15行:

import org.apache.hadoop.mapreduce.lib.input.KeyValueTextInputFormat;

第16行:

import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

提示:如果您使用的是Eclipse或NetBeans等IDE,则应该突出显示java编译错误,并向您显示如何解决这些错误的提示。如果您没有使用IDE,我强烈建议您这样做!由于您正在编写MapReduce程序,我建议使用Eclipse,您可以找到hadoop plugin