MapReduce程序无法读取超出限制的文本

时间:2018-03-15 11:02:03

标签: java hadoop mapreduce

我是Hadoop的新手并且学习了很少的mapreduce程序。我试图使用Mapper类读取CSV文件。 CSV包含标题和值,直到20列。奇怪的是,在阅读CSV文件时程序正在运行 直到我正在阅读第17个索引但得到ArrayOutOfBondException这一点。 即使存在第18个索引,我也无法理解它会抛出异常。

这是我的代码:

package org.apress.prohadoop.c3;

import java.io.IOException;
import java.util.Iterator;

import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.FileInputFormat;
import org.apache.hadoop.mapred.FileOutputFormat;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.MapReduceBase;
import org.apache.hadoop.mapred.Mapper;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reducer;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapred.TextInputFormat;
import org.apache.hadoop.mapred.TextOutputFormat;
import org.apache.log4j.Logger;
import org.apress.prohadoop.c3.CSVFileProcessor.LastFmConstants;

public class CSVFileProcessorNewAPI {

     protected static Logger logger = Logger.getLogger(CSVFileProcessorNewAPI.class);

    public class LastFmConstants {

        public static final int match_id = 0;
        public static final int inning_id= 1;
        public static final int batting_team = 2;
        public static final int bowling_team = 3;
        public static final int over = 4;
        public static final int ball = 5;
        public static final int batsman = 6;
        public static final int non_striker = 7;
        public static final int bowler = 8;
        public static final int is_super_over = 9;
        public static final int wide_runs = 10;
        public static final int total_runs_inOver=17;

        public static final int player_dismissed=18;
        public static final int dismissal_kind=19;
    }

    public static class MyMapper extends MapReduceBase 
                                 implements  Mapper<LongWritable, Text, Text, IntWritable> {


        public void map(LongWritable key, Text value,
                        OutputCollector<Text, IntWritable> output, 
                        Reporter reporter) throws IOException {

            logger.info("Vibhas Logger Started");

            try {
                if ((key).get() == 0 && value.toString().contains("header") /*Some condition satisfying it is header*/)
                    return;
            } catch (Exception e) {
                e.printStackTrace();
            }

            String[] parts = value.toString().split("[,]");

            String inning_id=parts[LastFmConstants.inning_id];
            String match_id_=parts[LastFmConstants.match_id];
            String batting_team=parts[LastFmConstants.batting_team];
            String bowling_team=parts[LastFmConstants.bowling_team];
            String over=parts[LastFmConstants.over];
            String ball=parts[LastFmConstants.ball];
            String batsman=parts[LastFmConstants.batsman];
            String non_striker=parts[LastFmConstants.non_striker];
            String bowler=parts[LastFmConstants.bowler];
            String wide_runs=parts[LastFmConstants.wide_runs];
            String total_runs_inOver=parts[LastFmConstants.total_runs_inOver];
            String player_Dismissed=parts[LastFmConstants.player_dismissed];
            String dismissal_kind=parts[LastFmConstants.dismissal_kind];

            if(!bowler.isEmpty() && bowler.trim().contains("Chahal")   && dismissal_kind.equalsIgnoreCase("S Dhawan")){
                int runs=Integer.parseInt(total_runs_inOver);
        output.collect(new Text("Match-->"+match_id_), new IntWritable(runs));
        }
        }
    }

    public static class MyReducer extends MapReduceBase 
                                  implements Reducer<Text, IntWritable, Text, IntWritable> {
        public void reduce(Text key, 
                           Iterator<IntWritable> values,
                           OutputCollector<Text, IntWritable> output, 
                           Reporter reporter) throws IOException {
            logger.info("Vibhas Reducer Started");
            int sum = 0;
            while (values.hasNext()) {
                sum += values.next().get();
            }
            output.collect(key, new IntWritable(sum));
        }
    }

    public static void main(String[] args) throws Exception {
        JobConf conf = new JobConf(CSVFileProcessorNewAPI.class);
        conf.setJobName("CSVFileProcessorNewAPI Job");

        conf.setOutputKeyClass(Text.class);
        conf.setOutputValueClass(IntWritable.class);

        conf.setMapperClass(MyMapper.class);
        conf.setCombinerClass(MyReducer.class);
        conf.setReducerClass(MyReducer.class);
        conf.setNumReduceTasks(1);
        conf.setInputFormat(TextInputFormat.class);
        conf.setOutputFormat(TextOutputFormat.class);
        FileInputFormat.setInputPaths(conf, new Path(args[0]));
        FileOutputFormat.setOutputPath(conf, new Path(args[1]));

        JobClient.runJob(conf);
    }
} 

CSV文件格式:

match_id,inning,batting_team,bowling_team,over,ball,batsman,non_striker,bowler,is_super_over,wide_runs,bye_runs,legbye_runs,noball_runs,penalty_runs,batsman_runs,extra_runs,total_runs,player_dismissed,dismissal_kind,fielder

1,1,Sunrisers Hyderabad,Royal Challengers Bangalore,1,1,DA Warner,S Dhawan,TS Mills,0,0,0,0,0,0,0,0,0,,,

1,1,Sunrisers Hyderabad,Royal Challengers Bangalore,1,2,DA Warner,S Dhawan,TS Mills,0,0,0,0,0,0,0,0,0,,,

例外:

hadoop jar /home/cloudera/Downloads/pro-apache-hadoop-master/prohadoop.jar org.apress.prohadoop.c3.CSVFileProcessorNewAPI /Input/test.csv /outPutCSV
18/03/15 02:19:19 INFO client.RMProxy: Connecting to ResourceManager at /0.0.0.0:8032
18/03/15 02:19:20 INFO client.RMProxy: Connecting to ResourceManager at /0.0.0.0:8032
18/03/15 02:19:20 WARN mapreduce.JobResourceUploader: Hadoop command-line option parsing not performed. Implement the Tool interface and execute your application with ToolRunner to remedy this.
18/03/15 02:19:20 INFO mapred.FileInputFormat: Total input paths to process : 1
18/03/15 02:19:20 INFO mapreduce.JobSubmitter: number of splits:2
18/03/15 02:19:21 INFO mapreduce.JobSubmitter: Submitting tokens for job: job_1520413460063_0042
18/03/15 02:19:21 INFO impl.YarnClientImpl: Submitted application application_1520413460063_0042
18/03/15 02:19:21 INFO mapreduce.Job: The url to track the job: http://quickstart.cloudera:8088/proxy/application_1520413460063_0042/
18/03/15 02:19:21 INFO mapreduce.Job: Running job: job_1520413460063_0042
18/03/15 02:19:27 INFO mapreduce.Job: Job job_1520413460063_0042 running in uber mode : false
18/03/15 02:19:27 INFO mapreduce.Job:  map 0% reduce 0%
18/03/15 02:19:43 INFO mapreduce.Job:  map 50% reduce 0%
18/03/15 02:19:45 INFO mapreduce.Job: Task Id : attempt_1520413460063_0042_m_000001_0, Status : FAILED
Error: java.lang.ArrayIndexOutOfBoundsException: 18
    at org.apress.prohadoop.c3.CSVFileProcessorNewAPI$MyMapper.map(CSVFileProcessorNewAPI.java:77)
    at org.apress.prohadoop.c3.CSVFileProcessorNewAPI$MyMapper.map(CSVFileProcessorNewAPI.java:1)
    at org.apache.hadoop.mapred.MapRunner.run(MapRunner.java:54)
    at org.apache.hadoop.mapred.MapTask.runOldMapper(MapTask.java:459)
    at org.apache.hadoop.mapred.MapTask.run(MapTask.java:343)
    at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:164)
    at java.security.AccessController.doPrivileged(Native Method)
    at javax.security.auth.Subject.doAs(Subject.java:415)
    at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1917)
    at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:158)

请帮助我解决这个问题。

2 个答案:

答案 0 :(得分:1)

执行String[] parts = value.toString().split("[,]");是危险的,然后假设您获得的每条记录都具有正确的列数。特别是在处理大量数据时,接收“脏”数据的可能性是不可忽视的。它只需要一个坏行,你的整个工作就会消失。

你应该做一个检查:

String[] parts = value.toString().split(",", -1);
if (parts != null && parts.length == 20) {
    //your normal logic  
} else {
    logger.warn("Unparseable record identified: {}", value);
}

答案 1 :(得分:-1)

您应该通过以下命令拆分String,然后您将全部设置:

String[] parts = value.toString().split(",", -1);

在第二个参数中添加-1可确保您在行尾保留空字符串。