我编写了这个Java hadoop程序,它将执行文件的并行索引。该文件是在eclipse中创建的
package org.myorg;
import java.io.*;
import java.util.*;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.conf.*;
import org.apache.hadoop.io.*;
import org.apache.hadoop.mapred.*;
import org.apache.hadoop.util.*;
public class ParallelIndexation {
public static class Map extends MapReduceBase implements Mapper<LongWritable, Text, Text, IntWritable> {
private final static IntWritable zero = new IntWritable(0);
private Text word = new Text();
public void map(LongWritable key, Text value, OutputCollector<Text, IntWritable> output, Reporter reporter) throws IOException {
String line = value.toString();
int CountComputers;
//DataInputStream ConfigFile = new DataInputStream( new FileInputStream("countcomputers.txt"));
FileInputStream fstream = new FileInputStream("/usr/countcomputers.txt"); // путь к файлу
DataInputStream in = new DataInputStream(fstream);
BufferedReader br = new BufferedReader(new InputStreamReader(in));
String result = br.readLine(); // читаем как строку
CountComputers = Integer.parseInt(result); // переводим строку в число
//CountComputers=ConfigFile.readInt();
in.close();
fstream.close();
ArrayList<String> paths = new ArrayList<String>();
StringTokenizer tokenizer = new StringTokenizer(line, "\n");
while (tokenizer.hasMoreTokens())
{
paths.add(tokenizer.nextToken());
}
String[] ConcatPaths= new String[CountComputers];
int NumberOfElementConcatPaths=0;
if (paths.size()%CountComputers==0)
{
for (int i=0; i<CountComputers; i++)
{
ConcatPaths[i]=paths.get(NumberOfElementConcatPaths);
NumberOfElementConcatPaths+=paths.size()/CountComputers;
for (int j=1; j<paths.size()/CountComputers; j++)
{
ConcatPaths[i]+="\n"+paths.get(i*paths.size()/CountComputers+j);
}
}
}
else
{
NumberOfElementConcatPaths=0;
for (int i=0; i<paths.size()%CountComputers; i++)
{
ConcatPaths[i]=paths.get(NumberOfElementConcatPaths);
NumberOfElementConcatPaths+=paths.size()/CountComputers+1;
for (int j=1; j<paths.size()/CountComputers+1; j++)
{
ConcatPaths[i]+="\n"+paths.get(i*(paths.size()/CountComputers+1)+j);
}
}
for (int k=paths.size()%CountComputers; k<CountComputers; k++)
{
ConcatPaths[k]=paths.get(NumberOfElementConcatPaths);
NumberOfElementConcatPaths+=paths.size()/CountComputers;
for (int j=1; j<paths.size()/CountComputers; j++)
{
ConcatPaths[k]+="\n"+paths.get((k-paths.size()%CountComputers)*paths.size()/CountComputers+paths.size()%CountComputers*(paths.size()/CountComputers+1)+j);
}
}
}
//CountComputers=ConfigFile.readInt();
for (int i=0; i<ConcatPaths.length; i++)
{
word.set(ConcatPaths[i]);
output.collect(word, zero);
}
}
}
public static class Reduce extends MapReduceBase implements Reducer<Text, IntWritable, Text, LongWritable> {
public native long Traveser(String Path);
public native void Configure(String Path);
public void reduce(Text key, <Iterator>IntWritable value, OutputCollector<Text, LongWritable> output, Reporter reporter) throws IOException {
long count;
String line = key.toString();
ArrayList<String> ProcessedPaths = new ArrayList<String>();
StringTokenizer tokenizer = new StringTokenizer(line, "\n");
while (tokenizer.hasMoreTokens())
{
ProcessedPaths.add(tokenizer.nextToken());
}
Configure("/etc/nsindexer.conf");
for (int i=0; i<ProcessedPaths.size(); i++)
{
count=Traveser(ProcessedPaths.get(i));
}
output.collect(key, new LongWritable(count));
}
static
{
System.loadLibrary("nativelib");
}
}
public static void main(String[] args) throws Exception {
JobConf conf = new JobConf(ParallelIndexation.class);
conf.setJobName("parallelindexation");
conf.setOutputKeyClass(Text.class);
conf.setOutputValueClass(LongWritable.class);
conf.setMapperClass(Map.class);
conf.setCombinerClass(Reduce.class);
conf.setReducerClass(Reduce.class);
conf.setInputFormat(TextInputFormat.class);
conf.setOutputFormat(TextOutputFormat.class);
FileInputFormat.setInputPaths(conf, new Path(args[0]));
FileOutputFormat.setOutputPath(conf, new Path(args[1]));
JobClient.runJob(conf);
}
}
由于表现,我收到了以下错误
12/10/25 18:51:13 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable
12/10/25 18:51:13 WARN mapred.JobClient: Use GenericOptionsParser for parsing the arguments. Applications should implement Tool for the same.
12/10/25 18:51:13 INFO mapred.FileInputFormat: Total input paths to process : 1
12/10/25 18:51:14 INFO mapred.JobClient: Running job: job_local_0001
12/10/25 18:51:14 INFO mapred.Task: Using ResourceCalculatorPlugin : null
12/10/25 18:51:14 INFO mapred.MapTask: numReduceTasks: 1
12/10/25 18:51:14 INFO mapred.MapTask: io.sort.mb = 100
12/10/25 18:51:14 INFO mapred.MapTask: data buffer = 79691776/99614720
12/10/25 18:51:14 INFO mapred.MapTask: record buffer = 262144/327680
12/10/25 18:51:14 WARN mapred.LocalJobRunner: job_local_0001
java.lang.UnsatisfiedLinkError: no nativelib in java.library.path
at java.lang.ClassLoader.loadLibrary(ClassLoader.java:1860)
at java.lang.Runtime.loadLibrary0(Runtime.java:845)
at java.lang.System.loadLibrary(System.java:1084)
at org.myorg.ParallelIndexation$Reduce.<clinit>(ParallelIndexation.java:105)
at java.lang.Class.forName0(Native Method)
at java.lang.Class.forName(Class.java:264)
at org.apache.hadoop.conf.Configuration.getClassByName(Configuration.java:820)
at org.apache.hadoop.conf.Configuration.getClass(Configuration.java:865)
at org.apache.hadoop.conf.Configuration.getClass(Configuration.java:891)
at org.apache.hadoop.mapred.JobConf.getCombinerClass(JobConf.java:1028)
at org.apache.hadoop.mapred.Task$CombinerRunner.create(Task.java:1380)
at org.apache.hadoop.mapred.MapTask$MapOutputBuffer.<init>(MapTask.java:981)
at org.apache.hadoop.mapred.MapTask.runOldMapper(MapTask.java:428)
at org.apache.hadoop.mapred.MapTask.run(MapTask.java:372)
at org.apache.hadoop.mapred.LocalJobRunner$Job.run(LocalJobRunner.java:212)
12/10/25 18:51:15 INFO mapred.JobClient: map 0% reduce 0%
12/10/25 18:51:15 INFO mapred.JobClient: Job complete: job_local_0001
12/10/25 18:51:15 INFO mapred.JobClient: Counters: 0
12/10/25 18:51:15 INFO mapred.JobClient: Job Failed: NA
Exception in thread "main" java.io.IOException: Job failed!
at org.apache.hadoop.mapred.JobClient.runJob(JobClient.java:1265)
at org.myorg.ParallelIndexation.main(ParallelIndexation.java:121)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:601)
at org.apache.hadoop.util.RunJar.main(RunJar.java:156)
我建立了
LD_LIBRARY_PATH='pwd'
export LD_LIBRARY_PATH
和libnativelib.so位于程序的hadoop执行的当前目录中。