我编写了这个Java hadoop程序,它将执行文件的并行索引。该文件是在eclipse中创建的
package org.myorg;
import java.io.*;
import java.util.*;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.conf.*;
import org.apache.hadoop.io.*;
import org.apache.hadoop.mapred.*;
import org.apache.hadoop.util.*;
public class ParallelIndexation {
public static class Map extends MapReduceBase implements Mapper<LongWritable, Text, Text, IntWritable> {
private final static IntWritable zero = new IntWritable(0);
private Text word = new Text();
public void map(LongWritable key, Text value, OutputCollector<Text, IntWritable> output, Reporter reporter) throws IOException {
String line = value.toString();
int CountComputers;
//DataInputStream ConfigFile = new DataInputStream( new FileInputStream("countcomputers.txt"));
FileInputStream fstream = new FileInputStream("/usr/countcomputers.txt"); // путь к файлу
DataInputStream in = new DataInputStream(fstream);
BufferedReader br = new BufferedReader(new InputStreamReader(in));
String result = br.readLine(); // читаем как строку
CountComputers = Integer.parseInt(result); // переводим строку в число
//CountComputers=ConfigFile.readInt();
in.close();
fstream.close();
ArrayList<String> paths = new ArrayList<String>();
StringTokenizer tokenizer = new StringTokenizer(line, "\n");
while (tokenizer.hasMoreTokens())
{
paths.add(tokenizer.nextToken());
}
String[] ConcatPaths= new String[CountComputers];
int NumberOfElementConcatPaths=0;
if (paths.size()%CountComputers==0)
{
for (int i=0; i<CountComputers; i++)
{
ConcatPaths[i]=paths.get(NumberOfElementConcatPaths);
NumberOfElementConcatPaths+=paths.size()/CountComputers;
for (int j=1; j<paths.size()/CountComputers; j++)
{
ConcatPaths[i]+="\n"+paths.get(i*paths.size()/CountComputers+j);
}
}
}
else
{
NumberOfElementConcatPaths=0;
for (int i=0; i<paths.size()%CountComputers; i++)
{
ConcatPaths[i]=paths.get(NumberOfElementConcatPaths);
NumberOfElementConcatPaths+=paths.size()/CountComputers+1;
for (int j=1; j<paths.size()/CountComputers+1; j++)
{
ConcatPaths[i]+="\n"+paths.get(i*(paths.size()/CountComputers+1)+j);
}
}
for (int k=paths.size()%CountComputers; k<CountComputers; k++)
{
ConcatPaths[k]=paths.get(NumberOfElementConcatPaths);
NumberOfElementConcatPaths+=paths.size()/CountComputers;
for (int j=1; j<paths.size()/CountComputers; j++)
{
ConcatPaths[k]+="\n"+paths.get((k-paths.size()%CountComputers)*paths.size()/CountComputers+paths.size()%CountComputers*(paths.size()/CountComputers+1)+j);
}
}
}
//CountComputers=ConfigFile.readInt();
for (int i=0; i<ConcatPaths.length; i++)
{
word.set(ConcatPaths[i]);
output.collect(word, zero);
}
}
}
public static class Reduce extends MapReduceBase implements Reducer<Text, IntWritable, Text, LongWritable> {
public native long Traveser(String Path);
public native void Configure(String Path);
public void reduce(Text key, <Iterator>IntWritable value, OutputCollector<Text, LongWritable> output, Reporter reporter) throws IOException {
long count;
String line = key.toString();
ArrayList<String> ProcessedPaths = new ArrayList<String>();
StringTokenizer tokenizer = new StringTokenizer(line, "\n");
while (tokenizer.hasMoreTokens())
{
ProcessedPaths.add(tokenizer.nextToken());
}
Configure("/etc/nsindexer.conf");
for (int i=0; i<ProcessedPaths.size(); i++)
{
count=Traveser(ProcessedPaths.get(i));
}
output.collect(key, new LongWritable(count));
}
static
{
System.loadLibrary("nativelib");
}
}
public static void main(String[] args) throws Exception {
JobConf conf = new JobConf(ParallelIndexation.class);
conf.setJobName("parallelindexation");
conf.setOutputKeyClass(Text.class);
conf.setOutputValueClass(LongWritable.class);
conf.setMapperClass(Map.class);
conf.setCombinerClass(Reduce.class);
conf.setReducerClass(Reduce.class);
conf.setInputFormat(TextInputFormat.class);
conf.setOutputFormat(TextOutputFormat.class);
FileInputFormat.setInputPaths(conf, new Path(args[0]));
FileOutputFormat.setOutputPath(conf, new Path(args[1]));
JobClient.runJob(conf);
}
}
我通过命令编译程序
javac -classpath /export/hadoop-1.0.1/hadoop-core-1.0.1.jar -d folder/classes folder/src/ParallelIndexation.java,
root@one:/export/hadoop-1.0.1/folder/classes# jar -cvf ParallelIndexation.jar -C /export/hadoop-1.0.1/folder/classes
然后,对于本机方法,我尝试创建.h文件
root@one:/export/hadoop-1.0.1/folder/classes# javah -classpath /export/hadoop-1.0.1/hadoop-core-1.0.1.jar;/export/hadoop-1.0.1/folder/classes/ParallelIndexation.jar -jni org.myorg.ParallelIndexation
收到以下错误
Error: no classes specified
bash: /export/hadoop-1.0.1/folder/classes/ParallelIndexation.jar: Permission denied
因此在ParallelIndexation.jar上设置权限0777
答案 0 :(得分:1)
您应该使用':'而不是';'分离类路径时:
javah -classpath /export/hadoop-1.0.1/hadoop-core-1.0.1.jar:/export/hadoop-1.0.1/folder/classes/ParallelIndexation.jar -jni org.myorg.ParallelIndexation
也许你应该检查你的罐子结构。
我尝试发出错误1已修复的命令,但它确实有效。
javac -cp /usr/share/hadoop/hadoop-core-1.0.4.jar org/myorg/ParallelIndexation.java
jar c org/myorg/ParallelIndexation*.class > ParallelIndexation.jar
javah -classpath /usr/share/hadoop/hadoop-core-1.0.4.jar:ParallelIndexation.jar -jni org.myorg.ParallelIndexation
问题在于您发布了命令的版本:
javah -classpath /export/hadoop-1.0.1/hadoop-core-1.0.1.jar;/export/hadoop-1.0.1/folder/classes/ParallelIndexation.jar -jni org.myorg.ParallelIndexation
bash treat';'作为命令分隔符,因此尝试执行两个命令:
javah -classpath /export/hadoop-1.0.1/hadoop-core-1.0.1.jar
/export/hadoop-1.0.1/folder/classes/ParallelIndexation.jar -jni org.myorg.ParallelIndexation
第一个命令,'javah'没有指定类,第二个命令试图执行'/export/hadoop-1.0.1/folder/classes/ParallelIndexation.jar',但文件不可执行。