hadoop 2.7.3 nullpointer异常中的Distributedcache

时间:2017-04-24 19:25:51

标签: java hadoop mapreduce

当我尝试进行DistributedCache hadoop编程时,我得到null。 请参阅我的mapper类代码:

 public class MapJoinDistributedCacheMapper extends Mapper<LongWritable, Text, Text, Text> {

private static HashMap<String, String> DepartmentMap = new HashMap<String, String>();
private BufferedReader brReader;
private String strDeptName = "";
private Text txtMapOutputKey = new Text("");
private Text txtMapOutputValue = new Text("");
Log log=LogFactory.getLog(MapJoinDistributedCacheMapper.class);
String key="";
URI eachPath1;
enum MYCOUNTER {
RECORD_COUNT, FILE_EXISTS, FILE_NOT_FOUND, SOME_OTHER_ERROR
}

@Override
protected void setup(Context context) throws IOException,InterruptedException {
    URI[] cacheFilesLocal=Job.getInstance(context.getConfiguration()).getCacheFiles();
    //URI[] cacheFilesLocal = context.getCacheFiles();

//Path[] cacheFilesLocal = DistributedCache.getLocalCacheFiles(context.getConfiguration());

for (URI eachPath : cacheFilesLocal) {

if (eachPath.equals("depart.txt")) {
context.getCounter(MYCOUNTER.FILE_EXISTS).increment(1);
log.info("the length---------------"+ eachPath.getPath());
loadDepartmentsHashMap(eachPath, context);
eachPath1=eachPath;

}
}
}
//startdepartMap



public void loadDepartmentsHashMap(URI eachPath, Context context) throws IOException {

String strLineRead = "";

try {
brReader = new BufferedReader(new FileReader(eachPath.getPath()));

// Read each line, split and load to HashMap
while ((strLineRead = brReader.readLine()) != null) {
String deptFieldArray[] = strLineRead.split("\t");
DepartmentMap.put(deptFieldArray[0].trim(),deptFieldArray[1].trim());
//DepartmentMap.put("002","hive");

}
} catch (FileNotFoundException e) {
e.printStackTrace();
context.getCounter(MYCOUNTER.FILE_NOT_FOUND).increment(1);
} catch (IOException e) {
context.getCounter(MYCOUNTER.SOME_OTHER_ERROR).increment(1);
e.printStackTrace();
}finally {
if (brReader != null) {
brReader.close();
}
}
}

@Override
public void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {

context.getCounter(MYCOUNTER.RECORD_COUNT).increment(1);

if (value.toString().length() > 0) {

String arrEmpAttributes[] = value.toString().split("\t");
int len=arrEmpAttributes.length;
DepartmentMap.put("002","hive");

try {
    strDeptName=DepartmentMap.get(arrEmpAttributes[3].toString());


} catch(Exception e){
    e.printStackTrace();
}
    /*finally {

     strDeptName = ((strDeptName.equals(null) || strDeptName.equals("")) ? "NOT-FOUND": strDeptName);
}*/

txtMapOutputKey.set(arrEmpAttributes[0].toString());

txtMapOutputValue.set(arrEmpAttributes[0].toString() + "\t"
+ arrEmpAttributes[1].toString() + "\t"
+ arrEmpAttributes[2].toString() + "\t"
+ arrEmpAttributes[3].toString() + "\t" + strDeptName);

}
strDeptName = "";
context.write(txtMapOutputKey, txtMapOutputValue);

}
}

我正在使用

  

&#34; DepartmentMap.put(&#34; 002&#34;&#34;配置单元&#34;);&#34;   因为我想知道mapper是否正常工作。   输出是:

1 1 name1 3000 001 null 2 2 name2 5000 002配置单元 3 3 name3 6000 005 null 4 4 name4 4000 003 null 5 5 name5 8000 004 null

现在我的问题是为什么我会变空? 在我的驱动程序中:

    Configuration conf = new Configuration();
Job job = new Job(conf);
job.setJobName("Map-side join with text lookup file in DCache");
job.addCacheFile(new URI("/home/hadoop/data/depart.txt"));

Log log=LogFactory.getLog(MapJoinDistributedCacheMapper.class);

job.setJarByClass(MapJoinDriver.class);
job.setMapperClass(MapJoinDistributedCacheMapper.class);
FileInputFormat.setInputPaths(job, new Path(args[0]));
FileOutputFormat.setOutputPath(job, new Path(args[1]));



job.setNumReduceTasks(0);

我无法找到该程序无法检测缓存中文件的原因。

请帮帮我。谢谢。

1 个答案:

答案 0 :(得分:0)

感谢每一个人。问题出在我的输入文件中。 感谢。