MapReduce用于使用Java查找字符串

时间:2016-04-30 03:09:15

标签: java windows hadoop mapreduce hortonworks-data-platform

我正在尝试从文本文件中搜索特定字符串并发现该字符串,但在运行此代码后,我在io.LongWritable之间得到了classCastException。

Error: java.lang.ClassCastException: org.apache.hadoop.io.LongWritable cannot be cast to org.apache.hadoop.io.Text
        at searchaString.SearchDriver$searchMap.map(SearchDriver.java:1)
        at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:146)
        at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:787)
        at org.apache.hadoop.mapred.MapTask.run(MapTask.java:341)
        at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:168)
        at java.security.AccessController.doPrivileged(Native Method)
        at javax.security.auth.Subject.doAs(Subject.java:415)
        at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1657)
        at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:162)

16/04/30 02:48:17 INFO mapreduce.Job:地图0%减少0% 16/04/30 02:48:23 INFO mapreduce.Job:任务ID:attempt_1461630807194_0021_m_000000_2,状态:未通过 错误:java.lang.ClassCastException:org.apache.hadoop.io.LongWritable无法强制转换为org.apache.hadoop.io.Text

package samples.wordcount;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
//import org.apache.hadoop.util.GenericOptionsParser;
//import org.apache.hadoop.mapred.lib.NLineInputFormat;
import java.io.IOException;
import java.util.Iterator;


public class WordCount {

    public static void main(String[] args) throws Exception {

        @SuppressWarnings("unused")
        JobClient jobC =new JobClient();

        Configuration conf = new Configuration();
        //String args[] = parser.getRemainingArgs();

        Job job = Job.getInstance(conf);
        job.setJobName("WordCount");


        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(IntWritable.class);

        job.setJarByClass(WordCount.class);

        job.setMapperClass(TokenizerMapper.class);
        job.setReducerClass(IntSumReducer.class);

        job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(IntWritable.class);

        //job.setInputFormatClass(TextInputFormat.class);

        FileInputFormat.addInputPath(job, new Path(args[0]));
        FileOutputFormat.setOutputPath(job, new Path(args[1]));

        /*String MyWord = args[2];
        TokenizerMapper.find = MyWord;*/

        System.exit(job.waitForCompletion(true) ?  0:1);
    }

    public static class TokenizerMapper extends Mapper<Text, Text, Text, IntWritable> {

        private final static IntWritable one = new IntWritable(1);
        //  private Text word = new Text();
        static String find="txt was not created";
        public int i;

        public void map(Text key, Text value,OutputCollector<Text, IntWritable> output,Reporter reporter) throws IOException, InterruptedException
        {
            String cleanLine = value.toString();        

            String[] cleanL =cleanLine.split("home");

            output.collect(new Text(cleanL[1]), one);

        }
    }

    public static class IntSumReducer extends Reducer<Text, IntWritable, Text, IntWritable> {



        public void reduce(Text key, Iterator<IntWritable> values, OutputCollector<Text, IntWritable> output,Reporter reporter)
                throws IOException, InterruptedException {

            int sum = 0;

            String wordText="txt was not created";

            while(values.hasNext()) {

                Boolean check = values.toString().contains("txt was not created");

                if(check)
                {
                    String[] cleanL =values.toString().split("\\.");

                    for(String w : cleanL)
                    {
                        if(w.length()>=wordText.length())

                        {
                            String wrd = w.substring(0,wordText.length()); 

                            if(wrd.equals(wordText))
                            {
                                IntWritable value=values.next();
                                sum += value.get();

                            }

                        }
                    }
                }
            }
            output.collect(key,new IntWritable(sum));
        }
    }
}

我是MapReduce的新手,不知道如何做到这一点。

这也是我的文本文件的外观:

tab / hdhd / hip / home.slkj.skjdh.dgsyququ / djkdjjd。****文本未创建** 我必须搜索特定的文本。

请回复。

如果您分享一些解决方案,请简要解释一下我应该在代码中更改哪些内容。

感谢。

2 个答案:

答案 0 :(得分:0)

您已经给出了Mapper类的签名,如下所示

公共静态类TokenizerMapper扩展了Mapper

map方法采用的输入键是行的byteoffset。例如,如果以下是你的文件的内容

Hello World!

地图功能将第一行的字节偏移(十六进制)作为键和&#34; Hello World!&#34;作为价值。 ByteOffset是Long值的kinf。

将输入键更改为LongWritable

答案 1 :(得分:0)

New Mapper:公共类TokenizerMapper扩展了Mapper

你写的方法是 cont.write(new Text(cleanL [1]),one);

&#34;一个&#34;不是IntWritabe的事情。要么像下面那样改变你的签名 公共类TokenizerMapper扩展了Mapper并编写如下

cont.write(new Text(cleanL [1]),new Text(&#34; one&#34;));

OR

公共类TokenizerMapper扩展Mapper并写为

cont.write(new Text(cleanL [1]),new IntWritable(1);