嗨这是我的第一个带hadoop的程序,我修改了wordcount程序。我无法执行此程序。我改变了地图输出和放大减速器输入到<text text>
。输入文件包含记录email gender age 21
。执行挂起显示Map 100% Reduce 100%
。
//MAPPER
public class WordMapper extends MapReduceBase implements
Mapper<LongWritable, Text, Text, Text> {
@Override
public void map(LongWritable key, Text value,
OutputCollector<Text, Text> output, Reporter reporter)
throws IOException {
String s = value.toString();
String s1;
Matcher m2;
FileSplit filesplit = (FileSplit) reporter.getInputSplit();
String fileName = filesplit.getPath().getName();
Pattern p1 = Pattern.compile("\\s+email\\s+gender\\s+age\\s+(\\S+)$");
m2=p1.matcher(s);
if (m2.find()){
s1 = m2.replaceFirst("omitted");
output.collect(new Text(s1), new Text(fileName));
}
}
}
//REDUCER
public class SumReducer extends MapReduceBase implements
Reducer<Text, Text, Text, IntWritable> {
@Override
public void reduce(Text key, Iterator<Text> values,
OutputCollector<Text, IntWritable> output, Reporter reporter)
throws IOException {
int cliCount = 0;
while (values.hasNext()) {
cliCount += 1;
}
output.collect(key, new IntWritable(cliCount));
}
}
//MAIN
public class WordCount extends Configured implements Tool {
@Override
public int run(String[] args) throws Exception {
if (args.length != 2) {
System.out.printf(
"Usage: %s [generic options] <input dir> <output dir>\n", getClass()
.getSimpleName());
ToolRunner.printGenericCommandUsage(System.out);
return -1;
}
JobConf conf = new JobConf(getConf(), WordCount.class);
conf.setJobName(this.getClass().getName());
FileInputFormat.setInputPaths(conf, new Path(args[0]));
FileOutputFormat.setOutputPath(conf, new Path(args[1]));
conf.setMapperClass(WordMapper.class);
conf.setReducerClass(SumReducer.class);
conf.setMapOutputKeyClass(Text.class);
conf.setMapOutputValueClass(Text.class);
conf.setOutputKeyClass(Text.class);
conf.setOutputValueClass(IntWritable.class);
JobClient.runJob(conf);
return 0;
}
public static void main(String[] args) throws Exception {
int exitCode = ToolRunner.run(new WordCount(), args);
System.exit(exitCode);
}
}
更新:
只有_log文件夹.xml
文件存在
我保持程序正在执行,hadoop将其杀死。
3/06/19 15:02:47 INFO mapred.JobClient: Total committed heap usage (bytes)=258875392
13/06/19 15:02:47 INFO mapred.JobClient: org.apache.hadoop.mapreduce.lib.input.FileInputFormatCounter
13/06/19 15:02:47 INFO mapred.JobClient: BYTES_READ=26
13/06/19 15:02:47 INFO mapred.JobClient: Job Failed: NA
Exception in thread "main" java.io.IOException: Job failed!
at org.apache.hadoop.mapred.JobClient.runJob(JobClient.java:1322)
at WordCount.run(WordCount.java:41)
at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:70)
at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:84)
at WordCount.main(WordCount.java:46)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:601)
at org.apache.hadoop.util.RunJar.main(RunJar.java:208)
答案 0 :(得分:1)
我在减速机中遇到了问题。由于错误的实现,迭代器没有继续前进。
while (values.hasNext()) {
cliCount += 1;
//values.next(); This was missing. Adding this runs the code perfectly.
}