Map减少管道分离的多行非结构化数据的代码

时间:2016-04-16 12:06:45

标签: java hadoop

我有一个用管道(||)分隔的多行非结构化数据,如下所示:

||
nokia,111,55000,uk,20160809,109
100,online,credit,
10%,90%,2015
||
iphone,6,60000,USA,20160809,110,
100,online,credit
||
samsung,s7,60000,USA,20160809,110
100,online,credit
||
..... millions of records ....

第一条记录是3行,下一条记录是2行,所以行数不是常数。请告诉我如何使用Map Reduce 2处理此数据,自定义输入拆分&自定义记录阅读器任何链接&博客也很感激。

请告诉我以下Custom Record Reader课程中的任何错误:

public class CustomInputFormat extends FileInputFormat<LongWritable,Text>{ 
    @Override
    public RecordReader<LongWritable,Text> createRecordReader(InputSplit    split,TaskAttemptContext context) {
        return new CustomRecordReader();
    }

public class CustomRecordReader extends RecordReader<LongWritable, Text> {
    private long start;
    private long pos;
    private long end;
    private LineReader in;
    private int maxLineLength;
    private LongWritable key = new LongWritable();
    private Text value = new Text();

    private static final Log LOG = LogFactory.getLog(CustomRecordReader.class);

    @Override
    public void initialize(InputSplit genericSplit, TaskAttemptContext context)     throws IOException {

        FileSplit split = (FileSplit) genericSplit;
        Configuration job = context.getConfiguration();
        this.maxLineLength = job.getInt("mapred.linerecordreader.maxlength",
                Integer.MAX_VALUE);

        start = split.getStart();
        end = start + split.getLength();
        final Path file = split.getPath();
        FileSystem fs = file.getFileSystem(job);
        FSDataInputStream fileIn = fs.open(split.getPath());
        boolean skipFirstLine = false;

        if (start != 0) {
            skipFirstLine = true;
            --start;
            fileIn.seek(start);
        }
        in = new LineReader(fileIn, job);
        if (skipFirstLine) {
            Text dummy = new Text();
            start += in.readLine(dummy, 0,
                    (int) Math.min((long) Integer.MAX_VALUE, end - start));
        }
        this.pos = start;
    }

    @Override
    public boolean nextKeyValue() throws IOException {
        int newSize = 0;
        while (pos < end) {
            newSize = in.readLine(value);
            if (newSize == 0) break;
            pos += newSize;
            key.set(pos);
            if(value.toString().equals("||"))
                LOG.info("Skipped line of size " + newSize + " at pos "
                + (pos - newSize));
            else
                break;
        }
        if (newSize == 0)
            return false;
        return true;
    }

    @Override
    public LongWritable getCurrentKey() throws IOException,
            InterruptedException {
        return key;
    }

    @Override
    public Text getCurrentValue() throws IOException, InterruptedException {
        return value;
    }

    @Override
    public float getProgress() throws IOException, InterruptedException {
        if (start == end) {
            return 0.0f;
        } else {
            return Math.min(1.0f, (pos - start) / (float) (end - start));
        }
    }

    @Override
    public void close() throws IOException {
        if (in != null) {
            in.close();
        }
    }
}


public class CustomMapper extends Mapper<LongWritable, CustomInputFormat, LongWritable, CustomInputFormat>{
    final static IntWritable one = new IntWritable(1);
    @Override
    protected void map(LongWritable key, CustomInputFormat value, org.apache.hadoop.mapreduce.Mapper.Context context) throws IOException, InterruptedException {
        System.out.println(" *** Key is: "+key+" value is: "+value+" *** ");
        if(null!=value ){
                   context.write(key, value);
        }
    }
}


public class CustomDriver extends Configured {  
    public static void main(String[] args) throws Exception  {      
        if(args.length!=2){
            System.out.println("pls give i/p & o/p direc");
            System.exit(-1);
        }

        Job job = new Job();        
        job.setJarByClass(CustomDriver.class);
        Configuration conf=job.getConfiguration();                      
        FileInputFormat.setInputPaths(job, new Path(args[0]));
        FileOutputFormat.setOutputPath(job, new Path(args[1]));

        job.setMapperClass(CustomMapper.class);

        job.setInputFormatClass(CustomInputFormat.class);

        job.setMapOutputKeyClass(LongWritable.class);
        job.setMapOutputValueClass(CustomInputFormat.class);        

        System.exit(job.waitForCompletion(true)?0:-1);
    }
}

我在运行时遇到以下错误:

[cloudera@quickstart gous]$ hadoop jar c3.jar com.ownUnstruct.CustomDriver /user/mr/custom /user/mr/out
16/04/18 23:15:01 INFO client.RMProxy: Connecting to ResourceManager at /0.0.0.0:8032
16/04/18 23:15:02 WARN mapreduce.JobSubmitter: Hadoop command-line option parsing not performed. Implement the Tool interface and execute your application with ToolRunner to remedy this.
16/04/18 23:15:02 INFO input.FileInputFormat: Total input paths to process : 1
16/04/18 23:15:02 INFO mapreduce.JobSubmitter: number of splits:1
16/04/18 23:15:02 INFO mapreduce.JobSubmitter: Submitting tokens for job: job_1461045457615_0001
16/04/18 23:15:03 INFO impl.YarnClientImpl: Submitted application application_1461045457615_0001
16/04/18 23:15:03 INFO mapreduce.Job: The url to track the job: http://quickstart.cloudera:8088/proxy/application_1461045457615_0001/
16/04/18 23:15:03 INFO mapreduce.Job: Running job: job_1461045457615_0001
16/04/18 23:15:15 INFO mapreduce.Job: Job job_1461045457615_0001 running in uber mode : false
16/04/18 23:15:15 INFO mapreduce.Job:  map 0% reduce 0%
16/04/18 23:15:22 INFO mapreduce.Job: Task Id : attempt_1461045457615_0001_m_000000_0, Status : FAILED
Error: java.io.IOException: Initialization of all the collectors failed. Error in last collector was :null
    at org.apache.hadoop.mapred.MapTask.createSortingCollector(MapTask.java:414)
    at org.apache.hadoop.mapred.MapTask.access$100(MapTask.java:81)
    at org.apache.hadoop.mapred.MapTask$NewOutputCollector.<init>(MapTask.java:698)
    at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:770)
    at org.apache.hadoop.mapred.MapTask.run(MapTask.java:341)
    at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:163)
    at java.security.AccessController.doPrivileged(Native Method)
    at javax.security.auth.Subject.doAs(Subject.java:415)
    at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1671)
    at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:158)
Caused by: java.lang.NullPointerException
    at org.apache.hadoop.mapred.MapTask$MapOutputBuffer.init(MapTask.java:1011)
    at org.apache.hadoop.mapred.MapTask.createSortingCollector(MapTask.java:402)
    ... 9 more  

1 个答案:

答案 0 :(得分:0)

假设管道符号总是在换行符前面跟后面,我认为不需要自定义输入拆分或自定义记录阅读器。您可以按如下方式编写映射器的代码:

var rightNow = DateTime.Now;
dateTimePicker1.Value = new DateTime(rightNow.Year, rightNow.Month, rightNow.Day);

这可以帮助您开始处理。

更新:将public class MyMapper extends Mapper<LongWritable, Text, Text, Text> { private Text textValue; @Override public void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException { //process the data } @Override public void run(Mapper<LongWritable, Text, Text, Text>.Context context) throws IOException, InterruptedException { setup(context); textValue = new Text(); StringBuffer sb = new StringBuffer(); while (context.nextKeyValue()) { String line = context.getCurrentValue().toString(); if(line.equals("||")){ textValue.set(sb.toString()); if(!("".equals(sb.toString()))) map(context.getCurrentKey(), textValue, context); sb = new StringBuffer(); } else { sb.append(line); } } cleanup(context); } } 中的代码更改为类似内容,然后尝试一下。

nextKeyValue()

更新:将映射器更改为以下内容:

@Override
public boolean nextKeyValue() throws IOException {
    int newSize = 0;
    StringBuffer sb = new StringBuffer();
    while (pos < end) {
        newSize = in.readLine(value);
        if (newSize == 0) break;
        pos += newSize;
        key.set(pos);
        if(value.toString().equals("||")){
            LOG.info("Skipped line of size " + newSize + " at pos "
                + (pos - newSize));
            break;
        } else
            sb.append(value.toString());
    }
    value.set(sb.toString());
    if (newSize == 0)
        return false;
    return true;
}

public class CustomMapper extends Mapper<LongWritable, Text, LongWritable, Text>{ @Override protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException { System.out.println(" *** Key is: "+key+" value is: "+value+" *** "); if(null!=value ){ context.write(key, value); } } } 课程中,更改以下行

CustomDriver

job.setMapOutputValueClass(CustomInputFormat.class);