MapReduce ArrayList类型不匹配

时间:2014-07-17 16:16:52

标签: java hadoop arraylist mapreduce

嗨,我在Hadoop已经有一周的时间了,并试验过它。

我将以下输入值设为CSV。

    PRAVEEN,400201399,Baby,026A1K,12/04/2010
    PRAVEEN,4002013410,TOY,02038L,1/04/2014
    PRAVEEN,2727272727272,abc,03383,03/14/2015
    PRAVEEN,2263637373,cde,7373737,12/24/2012

Map函数应该从CSV中选择第二个值作为键(即400201399等),将第三个和最后一个值作为VALUE(例如TOY和12/04/2010),我想将值放入一个ArrayList而不是Text。

但我收到以下错误 -

    Error: java.io.IOException: Type mismatch in value from map: expected org.apache.hadoop.io.Text, received java.util.ArrayList

Reduce函数也很简单,我必须遍历列表并获得所需的结果作为最终值(在下面的reduce代码中我只选择列表中的日期)

这是我的代码 -

    package com.test.mapreduce;
    import java.io.IOException;
    import java.text.ParseException;
    import java.text.SimpleDateFormat;
    import java.util.ArrayList;
    import java.util.Date;
    import java.util.HashSet;
    import java.util.Iterator;
    import java.util.List;
    import java.util.Set;

    import org.apache.hadoop.conf.Configuration;
    import org.apache.hadoop.conf.Configured;
    import org.apache.hadoop.fs.Path;
    import org.apache.hadoop.io.IntWritable;
    import org.apache.hadoop.io.LongWritable;
    import org.apache.hadoop.io.Text;
    import org.apache.hadoop.io.ArrayWritable;
    import org.apache.hadoop.mapred.FileInputFormat;
    import org.apache.hadoop.mapred.FileOutputFormat;
    import org.apache.hadoop.mapred.JobClient;
    import org.apache.hadoop.mapred.JobConf;
    import org.apache.hadoop.mapred.KeyValueTextInputFormat;
    import org.apache.hadoop.mapred.MapReduceBase;
    import org.apache.hadoop.mapred.Mapper;
    import org.apache.hadoop.mapred.OutputCollector;
    import org.apache.hadoop.mapred.Reducer;
    import org.apache.hadoop.mapred.Reporter;
    import org.apache.hadoop.mapred.TextInputFormat;
    import org.apache.hadoop.mapred.TextOutputFormat;
    import org.apache.hadoop.util.Tool;
    import org.apache.hadoop.util.ToolRunner;



 public class RetailCustomerProduct extends Configured implements Tool {

 public static class MapClass extends MapReduceBase
 implements Mapper<LongWritable, Text, Text, List<Text> > {

      private Text key1 = new Text();
      private List<Text> productList = new ArrayList<Text>();
      private Text value1 = new Text();
      private Text product = new Text();
      private int noofFields = 5;



       public void map(LongWritable key, Text value,
                 OutputCollector<Text, List<Text>> output,
                 Reporter reporter) throws IOException {

        String line = value.toString().replaceAll("\\s+","");
        String[] split = line.split(",");


        if(split.length!=noofFields){
        return;
        }

        else {
            key1.set((split[1])); 
            value1.set(split[4].toString().trim());
            product.set(split[2].toString().trim());
            productList.add(value1);
            productList.add(product);


            System.out.println(split[4].toString().trim());
            output.collect(key1, productList);
     }
    }
  }

 public static class Reduce extends MapReduceBase implements Reducer<Text, List<Text>, Text,      Text> {

        public void reduce(Text key, Iterator<List<Text>> values, OutputCollector<Text, Text> output, Reporter reporter) throws IOException {

            SimpleDateFormat formatter = new SimpleDateFormat("MM/dd/yyyy");
            Date date = new Date();

            List<String> dateList = new ArrayList<String>();
            List<String> productList = new ArrayList<String>();

            for(Iterator<List<Text>> it = values; it.hasNext();) {
                // add the values in the arrayList
                dateList.add(((Text)it.next().get(0)).toString());
                productList.add(((Text)it.next().get(1)).toString());
                }

            if(dateList.size()==1){ 

                try  {
                    date = formatter.parse(dateList.get(0).toString());
                } catch (ParseException e) {
                    e.printStackTrace();
                }
            }  
            else {
                String str = dateList.get(0).toString();
                try {

                    date = formatter.parse(dateList.get(0).toString());

                } catch (ParseException e1) {
                    e1.printStackTrace();
                }

                for(int i=0 ; i <dateList.size();++i){
                    try {

                        if((formatter.parse(dateList.get(i).toString())).compareTo(date)>0){
                            date=formatter.parse(dateList.get(i).toString());
                            // getting the max date from the list
                        }
                    }
                    catch (ParseException e) {
                        e.printStackTrace();
                    }
                } 
            }    

            Text value = new Text(date.toString());
            output.collect(key, value);
        }
    }



 public int run(String[] args) throws Exception {
 Configuration conf = getConf();

 JobConf job = new JobConf(conf, RetailCustomerProduct.class);

 Path in = new Path(args[0]);
 Path out = new Path(args[1]);
 FileInputFormat.setInputPaths(job, in);
 FileOutputFormat.setOutputPath(job, out);

 job.setJobName("RetailCustomerProduct");
 job.setMapperClass(MapClass.class);
 job.setReducerClass(Reduce.class);

 job.setInputFormat(TextInputFormat.class);
 job.setOutputFormat(TextOutputFormat.class);

 job.setOutputKeyClass(Text.class);
 job.setOutputValueClass(Text.class);
 job.set("key.value.separator.in.input.line", ",");

 JobClient.runJob(job);

 return 0;
}

 public static void main(String[] args) throws Exception { 
 int res = ToolRunner.run(new Configuration(), new RetailCustomerProduct(), args);

 System.exit(res);
 }

}

hadoop中有没有不同的ArrayList实现?

我的Map函数应将Longwritable作为KEY,将Text作为VALUE,并将Text作为KEY输出,将ArrayList作为VALUE输出。

My Reduce函数应接受Text为KEY,ArrayList为Value,然后将Text输出为KEY,将Text输出为VALUE。

所以在驱动程序类中,必须包含哪些类,目前就是这样。

 job.setInputFormat(TextInputFormat.class);
 job.setOutputFormat(TextOutputFormat.class);

 job.setOutputKeyClass(Text.class);
 job.setOutputValueClass(Text.class);

任何人都可以帮助使用正确的代码吗?

1 个答案:

答案 0 :(得分:0)

我也是Hadoop的新手。但我认为这就是问题所在:

job.setOutputValueClass(Text.class);

这会将输出类型设置为Text,而不是List<Text>。 我还没有尝试输出列表。相反,我从列表中构建一个制表符分隔的字符串,并将其作为Text的实例输出。

new Text(split[4].toString().trim() + "\t" + split[2].toString().trim());