我的执行文件是:
package hadoop;
import java.util.*;
import java.io.IOException;
import java.io.IOException;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.conf.*;
import org.apache.hadoop.io.*;
import org.apache.hadoop.mapred.*;
import org.apache.hadoop.util.*;
import javax.lang.model.util.Elements;
public class ProcessUnits
{
//Mapper class
public static class E_EMapper extends MapReduceBase implements
Mapper<LongWritable ,/*Input key Type */
Text, /*Input value Type*/
Text, /*Output key Type*/
IntWritable> /*Output value Type*/
{
//Map function
public void map(LongWritable key, Text value,
OutputCollector<Text, IntWritable> output,
Reporter reporter) throws IOException
{
String line = value.toString();
String lasttoken = null;
StringTokenizer s = new StringTokenizer(line,"\t");
String year = s.nextToken();
while(s.hasMoreTokens())
{
lasttoken=s.nextToken();
}
int avgprice = Integer.parseInt(lasttoken);
output.collect(new Text(year), new IntWritable(avgprice));
}
}
//Reducer class
public static class E_EReduce extends MapReduceBase implements
Reducer< Text, IntWritable, Text, IntWritable >
{
//Reduce function
public void reduce( Text key, Iterator <IntWritable> values,
OutputCollector<Text, IntWritable> output, Reporter reporter) throws IOException
{
int maxavg=30;
int val=Integer.MIN_VALUE;
while (values.hasNext())
{
if((val=values.next().get())>maxavg)
{
output.collect(key, new IntWritable(val));
}
}
}
}
//Main function
public static void main(String args[])throws Exception
{
JobConf conf = new JobConf(Eleunits.class);
conf.setJobName("max_eletricityunits");
conf.setOutputKeyClass(Text.class);
conf.setOutputValueClass(IntWritable.class);
conf.setMapperClass(E_EMapper.class);
conf.setCombinerClass(E_EReduce.class);
conf.setReducerClass(E_EReduce.class);
conf.setInputFormat(TextInputFormat.class);
conf.setOutputFormat(TextOutputFormat.class);
FileInputFormat.setInputPaths(conf, new Path(args[0]));
FileOutputFormat.setOutputPath(conf, new Path(args[1]));
JobClient.runJob(conf);
}
}
当我用以下命令编译时:
javac -classpath /home/javier/entrada/hadoop-core-1.2.1.jar -d / home / javier / units /home/javier/entrada/ProcessUnits.java
我遇到以下错误:
javac -classpath /home/javier/entrada/hadoop-core-1.2.1.jar -d /home/javier/units /home/javier/entrada/ProcessUnits.java
/home/javier/entrada/ProcessUnits.java:72: error: cannot find symbol
JobConf conf = new JobConf(Eleunits.class);
^
symbol: class Eleunits
location: class ProcessUnits
1 error
我的hadoop版本是2.9.2,而我的Java版本是1.8.0_191
当我用eclipse打开它并查看它时,找不到Eleunits.class的导入
答案 0 :(得分:0)
我的hadoop版本是2.9.2,而我的Java版本是1.8.0_191
首先,hadoop-core-1.2.1.jar
的构建甚至还没有想到Hadoop 2.9.2,因此您将需要一个新的JAR
当我用eclipse打开它并查看它时,找不到Eleunits.class的导入
不清楚您为什么不一直使用Eclipse!即使不使用Maven或Gradle来获取Hadoop的正确库版本,也令我感到恐惧……但是Eclipse可能并没有说谎。您只显示了一个类,而该类未称为Eleunits
,除了从其他地方复制之外,我不确定您如何获得该值
此外,主类应该extends Configured implements Tool
,正如您在其他MapReduce示例中所发现的