package com.delhi;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
public class UppercaseDriver extends Configured implements Tool {
public int run(String[] args) throws Exception{
if(args.length !=2){
System.out.printf("Two parameters are required- <input dir> <output dir>n");
return -1;}
Configuration conf = new Configuration();
Job job=Job.getInstance(conf);
job.setJobName("uppercase");
job.setJarByClass(UppercaseDriver.class);
job.setMapperClass(UpperCaseMapper.class);
job.setReducerClass(UpperCaseReduce.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(LongWritable.class);
FileInputFormat.setInputPaths(job,new Path(args[0]));
FileOutputFormat.setOutputPath(job,new Path(args[1]));
//job.setNumReduceTasks(1);
boolean success = job.waitForCompletion(true);
return success ?0:1;
}
public static void main(String[] args) throws Exception {
int exitcode = ToolRunner.run(new UppercaseDriver(), args);
System.exit(exitcode);
}
}
这是驱动程序。
接下来是reducer程序:
package com.delhi;
import java.io.IOException;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
public class UpperCaseReduce extends Reducer< Text,LongWritable, Text, LongWritable>{
public void reduce(Text key, Iterable<LongWritable> value,
org.apache.hadoop.mapreduce.Reducer.Context context)
throws IOException, InterruptedException {
int sum=0;
System.out.println("how +++++++++++++++++" + key);
for(LongWritable st: value){
sum = (int) (sum + st.get());
}
System.out.println("how +++++++++++++++++" + key);
context.write(key, new LongWritable(sum));
}
}
接下来是mapper程序:
package com.delhi;
import java.io.IOException;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
public class UpperCaseMapper extends Mapper<Object, Text, Text, LongWritable>{
@Override
protected void map(Object key, Text value,
org.apache.hadoop.mapreduce.Mapper.Context context)
throws IOException, InterruptedException {
String line = value.toString();
String arr[] = line.split(" ");
System.out.println("hello++++++++++++++++++++++++++++");
for(String st: arr){
//context.write(new Text(st.toUpperCase().trim()),new LongWritable(1));
context.write(new Text(st),new LongWritable(1));
}
}
}
从已经存在的解决方案中我发现在这种类型的问题中,outputkeyclass和outputvalueclass应该与reducer匹配。我想我正在考虑那部分。我的情况@Override for reduce不起作用。我正在使用hadoop 7.2 0.3。我也尝试使用trim函数。问题是wordcount没有发生。我只给出了“word 1”,对于输出文件中的任何单词。 我从不同的问题开始,我就这样结束了。请帮助我。 感谢。
答案 0 :(得分:0)
因此,如果您在@Override
方法中添加reduce
注释,则会收到错误消息:
Method不会覆盖超类
中的方法
因此,您遇到的问题是方法签名与Reducer
中的方法签名不匹配。
你有:
public void reduce(Text key, Iterable<LongWritable> value,
org.apache.hadoop.mapreduce.Reducer.Context context)
如果您将其更改为:
public void reduce(Text key, Iterable<LongWritable> value, Context context)
错误消失了。由于您的reduce
方法并未覆盖任何方法,因此它不会被调用,并且它会使用与您的输出匹配的Identity reduce。