java

时间:2018-03-30 06:36:49

标签: java hadoop mapreduce

import java.io.BufferedReader;
import java.io.FileReader;
import java.io.IOException;
import java.util.Date;
import java.util.HashMap;
import java.text.ParseException;
import java.text.SimpleDateFormat;

import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;



public class StubMapper extends Mapper<LongWritable, Text, Text, MinMaxCountTuple> {

    private Text outUserId = new Text();
    private MinMaxCountTuple outTuple = new MinMaxCountTuple();

    private final static SimpleDateFormat frmt = 
            new SimpleDateFormat("yyyy-MM--dd'T'HH:mm:ss.SSS");

//  public static HashMap<String, String> getMapFromCSV(String filePath) throws IOException
//  {
//      
//      HashMap<String, String> words = new HashMap<String, String>();
//      
//      /*BufferedReader in = new BufferedReader(new FileReader(filePath));
//
//      String line;
//      //= in.readLine())
//        while ((line = in.readLine()) != null) {
//            String columns[] = line.split(",");
//            if (!words.containsKey(columns[1])) {
//                words.put(columns[1], columns[6]);
//            }
//
//        }
//        
//        return words;
//        
//        */
//
//
//
//      String line=filePath;
//      
//      while(line!=null){
//          
//          String columns[] = line.split(",");
//          if (columns.length>6){
//            if (!words.containsKey(columns[1])) {
//                words.put(columns[1], columns[6]);
//            } 
//          }
//          
//      }
//      return words;
//  }

@Override
  public void map(LongWritable key, Text value, Context context)
            throws IOException, InterruptedException {


//    HashMap<String, String> parsed = getMapFromCSV(value.toString());
      //String columns[] = value.toString().split("\t");

//    String strDate = parsed.get("CheckoutDateTime");

      //String userId = columns[1];
      //String strDate = columns[6];
    if(value.toString().startsWith("BibNumber"))
    {
        return;
    }
//    String userId = parsed.get("BibNumber");
      String data[] = value.toString().split(",",-1);
      String userId = data[0];
        String DateTime = data[5];


        Date creationDate = frmt.parse(DateTime);

        outTuple.setMin(creationDate);
        outTuple.setMax(creationDate);

        outTuple.setCount(1);

        outUserId.set(userId);

        context.write(outUserId, outTuple);


        // TODO Auto-generated catch block
        e.printStackTrace();
    }


  }
}




import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.text.SimpleDateFormat;
import java.util.Date;

import org.apache.hadoop.io.Writable;

public class MinMaxCountTuple implements Writable{

    private Date min = new Date();
    private Date max = new Date();
    private long count = 0;

    private final static SimpleDateFormat frmt = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSS");

    public Date getMin()
    {
        return min;
    }

    public void setMin(Date min)
    {
        this.min = min;
    }

    public Date getMax()
    {
        return max;
    }

    public void setMax(Date max)
    {
        this.max = max;
    }

    public long getCount()
    {
        return count;
    }

    public void setCount(long count)
    {
        this.count = count;
    }

    @Override
    public void write(DataOutput out) throws IOException {
        // TODO Auto-generated method stub
        out.writeLong(min.getTime());
        out.writeLong(max.getTime());
        out.writeLong(count);
    }

    public String toString()
    {
        return frmt.format(min) + "\t" + frmt.format(max) + "\t" + count;
    }

    @Override
    public void readFields(DataInput in) throws IOException {
        // TODO Auto-generated method stub
        min = new Date(in.readLong());
        max = new Date(in.readLong());
        count = in.readLong();
    }

}

这两个代码是mapper类和minmax类,它找到checkoutdate时间的最大值。基本上,我想要做的是获得一些日期将主要用于书籍的日期。所以,我只是在csv文件中使用key和value作为userId和checkoutdatetime。代码运行良好,但问题是映射器输入显示数据的大小,但是,mapper输出只有0大小的文件,这意味着它没有从输入获得一些输出。我不知道哪个部分是错的。我张贴了我的csv文件的屏幕截图。请赐教,真的很感激。谢谢。如果您需要有关我的代码的更多信息,请告诉我,我会提供更多信息。

18/03/30 01:38:41 INFO mapred.JobClient:     Map input records=3794727
18/03/30 01:38:41 INFO mapred.JobClient:     Map output records=0
18/03/30 01:38:41 INFO mapred.JobClient:     Map output bytes=0
18/03/30 01:38:41 INFO mapred.JobClient:     Input split bytes=416
18/03/30 01:38:41 INFO mapred.JobClient:     Combine input records=0
18/03/30 01:38:41 INFO mapred.JobClient:     Combine output records=0
18/03/30 01:38:41 INFO mapred.JobClient:     Reduce input groups=0
18/03/30 01:38:41 INFO mapred.JobClient:     Reduce shuffle bytes=24
18/03/30 01:38:41 INFO mapred.JobClient:     Reduce input records=0
18/03/30 01:38:41 INFO mapred.JobClient:     Reduce output records=0

enter image description here

1 个答案:

答案 0 :(得分:1)

Mapper代码看起来很好。您是否在驱动程序中明确添加了输出键和输出值。

job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(MinMaxCountTuple.class);

如果驱动程序中未提及,则可以尝试。