我一直在集群和karmasphere接口上使用分布式缓存尝试复制连接。我在下面粘贴了代码。我的程序无法在缓存中找到该文件
import java.io.BufferedReader;
import java.io.FileReader;
import java.io.IOException;
import java.util.Hashtable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.filecache.DistributedCache;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.FileInputFormat;
import org.apache.hadoop.mapred.FileOutputFormat;
import org.apache.hadoop.mapred.InputFormat;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.KeyValueTextInputFormat;
import org.apache.hadoop.mapred.MapReduceBase;
import org.apache.hadoop.mapred.Mapper;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapred.TextOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
// A demostration of Hadoop's DistributedCache tool
//
public class MapperSideJoinWithDistributedCache extends Configured implements Tool {
private final static String inputa = "C:/Users/LopezGG/workspace/Second_join/input1_1" ;
public static class MapClass extends MapReduceBase implements Mapper<Text, Text, Text, Text> {
private Hashtable<String, String> joinData = new Hashtable<String, String>();
@Override
public void configure(JobConf conf) {
try {
Path [] cacheFiles = DistributedCache.getLocalCacheFiles(conf);
System.out.println("ds"+DistributedCache.getLocalCacheFiles(conf));
if (cacheFiles != null && cacheFiles.length > 0) {
String line;
String[] tokens;
BufferedReader joinReader = new BufferedReader(new FileReader(cacheFiles[0].toString()));
try {
while ((line = joinReader.readLine()) != null) {
tokens = line.split(",", 2);
joinData.put(tokens[0], tokens[1]);
}
} finally {
joinReader.close();
}
}
else
System.out.println("joinreader not set" );
} catch(IOException e) {
System.err.println("Exception reading DistributedCache: " + e);
}
}
public void map(Text key, Text value, OutputCollector<Text, Text> output, Reporter reporter) throws IOException {
String joinValue = joinData.get(key.toString());
if (joinValue != null) {
output.collect(key,new Text(value.toString() + "," + joinValue));
}
}
}
public int run(String[] args) throws Exception {
Configuration conf = getConf();
JobConf job = new JobConf(conf, MapperSideJoinWithDistributedCache.class);
DistributedCache.addCacheFile(new Path(args[0]).toUri(), job);
//System.out.println( DistributedCache.addCacheFile(new Path(args[0]).toUri(), conf));
Path in = new Path(args[1]);
Path out = new Path(args[2]);
FileInputFormat.setInputPaths(job, in);
FileOutputFormat.setOutputPath(job, out);
job.setJobName("DataJoin with DistributedCache");
job.setMapperClass(MapClass.class);
job.setNumReduceTasks(0);
job.setInputFormat( KeyValueTextInputFormat.class);
job.setOutputFormat(TextOutputFormat.class);
job.set("key.value.separator.in.input.line", ",");
JobClient.runJob(job);
return 0;
}
public static void main(String[] args) throws Exception {
long time1= System.currentTimeMillis();
System.out.println(time1);
int res = ToolRunner.run(new Configuration(),
new MapperSideJoinWithDistributedCache(),args);
long time2= System.currentTimeMillis();
System.out.println(time2);
System.out.println("millsecs elapsed:"+(time2-time1));
System.exit(res);
}
}
我得到的错误是
O mapred.MapTask: numReduceTasks: 0
Exception reading DistributedCache: java.io.FileNotFoundException: \tmp\hadoop-LopezGG\mapred\local\archive\-2564469513526622450_-1173562614_1653082827\file\C\Users\LopezGG\workspace\Second_join\input1_1 (The system cannot find the file specified)
ds[Lorg.apache.hadoop.fs.Path;@366a88bb
12/04/24 23:15:01 INFO mapred.Task: Task:attempt_local_0001_m_000000_0 is done. And is in the process of commiting
12/04/24 23:15:01 INFO mapred.LocalJobRunner:
但任务执行完毕。 Coudl有人请帮助我&gt;我查看了其他帖子并进行了所有修改,但仍无法正常工作
答案 0 :(得分:0)
我必须承认我从不使用DistributedCache类(而是通过GenericOptionsParser使用-files选项),但我不确定DistributedCache会在运行作业之前自动将本地文件复制到HDFS中。
虽然我在Hadoop文档中找不到任何关于这一事实的证据,但在Pro Hadoop书中提到了这样的事情:
在您的情况下,首先将文件复制到HDFS,当您调用DistributedCache.addCacheFile时,在HDFS中传递文件的URI,看看它是否适合您