无法在mapper,MapReduce中访问hashmap

时间:2017-01-26 23:21:12

标签: java hadoop mapreduce

我想使用另一个文件中定义的字典(csv)替换mapper中输入数据的值。所以我试着将csv数据放到HashMap中并在mapper中引用它。

下面的java代码和csv是我程序的简化版本。此代码在我的本地环境中工作(Mac OS X,伪分布式模式),但不在我的EC2实例中(ubuntu,伪分布式模式)

详细地说,我在这个过程中得到了这个标准:

cat:4
human:2
flamingo:1

这意味着文件读取器成功地将csv数据放入HashMap。

然而,映射器没有映射任何内容,因此我在EC2环境中得到空输出,尽管它映射了3 *(输入文件的行数)元素并在本地生成了以下内容:

test,cat
test,flamingo
test,human

有没有人有答案或提示?

Test.java

import java.io.IOException;
import java.util.StringTokenizer;
import java.io.FileReader;
import java.io.BufferedReader;
import java.io.DataInput; 
import java.util.HashMap;
import java.util.Map;
import java.util.Map.Entry;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;

import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.mapreduce.RecordWriter;
import org.apache.hadoop.io.WritableUtils;

public class Test {

  public static HashMap<String, Integer> map  = new HashMap<String, Integer>();

  public static class Mapper1 extends Mapper<LongWritable, Text, Text, Text> {
    @Override
    protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
      for(Map.Entry<String, Integer> e : map.entrySet()) {
        context.write(new Text(e.getKey()), new Text("test"));
      }
    }
  }

  public static class Reducer1 extends Reducer<Text, Text, Text, Text> {
    @Override
    protected void reduce(Text key, Iterable<Text> vals, Context context) throws IOException, InterruptedException {
      context.write(new Text("test"), key);
    }
  }

  public static class CommaTextOutputFormat extends TextOutputFormat<Text, Text> {
    @Override
    public RecordWriter<Text, Text> getRecordWriter(TaskAttemptContext job) throws IOException, InterruptedException {
      Configuration conf = job.getConfiguration();
      String extension = ".txt";
      Path file = getDefaultWorkFile(job, extension);
      FileSystem fs = file.getFileSystem(conf);
      FSDataOutputStream fileOut = fs.create(file, false);
      return new LineRecordWriter<Text, Text>(fileOut, ",");
    }
  }

  public static void get_list(String list_path){
    try {
      FileReader fr = new FileReader(list_path);
      BufferedReader br = new BufferedReader(fr);
      String line = null, name = null;
      int leg = 0;

      while ((line = br.readLine()) != null) {
        if (!line.startsWith("name") && !line.trim().isEmpty()) {
          String[] name_leg = line.split(",", 0);
          name = name_leg[0];
          leg = Integer.parseInt(name_leg[1]);
          map.put(name, leg);
        }
      }
      br.close();
    }
    catch(IOException ex) {
      System.err.println(ex.getMessage());
      ex.printStackTrace();
    }

    for(Map.Entry<String, Integer> e : map.entrySet()) {
      System.out.println(e.getKey() + ":" + e.getValue());
    }
  }

  public static void main(String[] args) throws Exception {
    Configuration conf = new Configuration();

    if (args.length != 3) {
      System.err.println(
        "Need 3 arguments: <input dir> <output base dir> <list path>");
      System.exit(1);
    }

    get_list(args[2]);
    Job job = Job.getInstance(conf, "test");

    job.setJarByClass(Test.class);
    job.setMapperClass(Mapper1.class);
    job.setReducerClass(Reducer1.class);
    job.setNumReduceTasks(1);
    job.setInputFormatClass(TextInputFormat.class);

    // mapper output
    job.setMapOutputKeyClass(Text.class);
    job.setMapOutputValueClass(Text.class);

    // reducer output
    job.setOutputKeyClass(Text.class);
    job.setOutputValueClass(Text.class);

    // formtter
    job.setOutputFormatClass(CommaTextOutputFormat.class);

    FileInputFormat.addInputPath(job, new Path(args[0]));
    FileOutputFormat.setOutputPath(job, new Path(args[1]));

    if(!job.waitForCompletion(true)){
      System.exit(1);
    }

    System.out.println("All Finished");
    System.exit(0);
  }
}

list.csv(args [2])

name,legs
cat,4
human,2
flamingo,1

=================================

我参考@Rahul Sharma的回答并修改我的代码,如下所示。然后我的代码在两种环境中都有效。

非常感谢@Rahul Sharma和@Serhiy的精确答案和有用的评论。

Test.java

import java.io.IOException;
import java.util.StringTokenizer;
import java.io.FileReader;
import java.io.BufferedReader;
import java.io.DataInput; 
import java.util.HashMap;
import java.util.Map;
import java.util.Map.Entry;
import java.net.URI;
import java.io.InputStreamReader;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;

import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.mapreduce.RecordWriter;
import org.apache.hadoop.io.WritableUtils;

public class Test {

  public static HashMap<String, Integer> map  = new HashMap<String, Integer>();

  public static class Mapper1 extends Mapper<LongWritable, Text, Text, Text> {

    @Override
    protected void setup(Context context) throws IOException, InterruptedException {
      URI[] files = context.getCacheFiles();
      Path list_path = new Path(files[0]);

      try {
        FileSystem fs = list_path.getFileSystem(context.getConfiguration());
        BufferedReader br = new BufferedReader(new InputStreamReader(fs.open(list_path)));
        String line = null, name = null;
        int leg = 0;

        while ((line = br.readLine()) != null) {
          if (!line.startsWith("name") && !line.trim().isEmpty()) {
            String[] name_leg = line.split(",", 0);
            name = name_leg[0];
            leg = Integer.parseInt(name_leg[1]);
            map.put(name, leg);
          }
        }
        br.close();
      }
      catch(IOException ex) {
        System.err.println(ex.getMessage());
        ex.printStackTrace();
      }

      for(Map.Entry<String, Integer> e : map.entrySet()) {
        System.out.println(e.getKey() + ":" + e.getValue());
      }
    }

    @Override
    protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
      for(Map.Entry<String, Integer> e : map.entrySet()) {
        context.write(new Text(e.getKey()), new Text("test"));
      }
    }

  }

  public static class Reducer1 extends Reducer<Text, Text, Text, Text> {
    @Override
    protected void reduce(Text key, Iterable<Text> vals, Context context) throws IOException, InterruptedException {
      context.write(new Text("test"), key);
    }
  }

  // Writer
  public static class CommaTextOutputFormat extends TextOutputFormat<Text, Text> {
    @Override
    public RecordWriter<Text, Text> getRecordWriter(TaskAttemptContext job) throws IOException, InterruptedException {
      Configuration conf = job.getConfiguration();
      String extension = ".txt";
      Path file = getDefaultWorkFile(job, extension);
      FileSystem fs = file.getFileSystem(conf);
      FSDataOutputStream fileOut = fs.create(file, false);
      return new LineRecordWriter<Text, Text>(fileOut, ",");
    }
  }

  public static void main(String[] args) throws Exception {
    Configuration conf = new Configuration();

    if (args.length != 3) {
      System.err.println(
        "Need 3 arguments: <input dir> <output base dir> <list path>");
      System.exit(1);
    }

    Job job = Job.getInstance(conf, "test");
    job.addCacheFile(new Path(args[2]).toUri());

    job.setJarByClass(Test.class);
    job.setMapperClass(Mapper1.class);
    job.setReducerClass(Reducer1.class);
    job.setNumReduceTasks(1);
    job.setInputFormatClass(TextInputFormat.class);

    // mapper output
    job.setMapOutputKeyClass(Text.class);
    job.setMapOutputValueClass(Text.class);

    // reducer output
    job.setOutputKeyClass(Text.class);
    job.setOutputValueClass(Text.class);

    // formtter
    job.setOutputFormatClass(CommaTextOutputFormat.class);

    FileInputFormat.addInputPath(job, new Path(args[0]));
    FileOutputFormat.setOutputPath(job, new Path(args[1]));

    if(!job.waitForCompletion(true)){
      System.exit(1);
    }

    System.out.println("All Finished");
    System.exit(0);
  }
}

1 个答案:

答案 0 :(得分:2)

首先,您需要了解有关mapreduce框架的更多信息。

您的程序在本地模式下的行为与预期相同,因为Mapper,reducer和Job在同一JVM上启动。在伪分布模式或分布式模式的情况下,将为每个组件分配单独的jvms。使用get_list放入hashMap的值对于mapper和reducer是不可见的,因为它们位于单独的jvms中

使用distributed cache使其在群集模式下运行。

  1. 作业主类将文件添加到分布式缓存中:

    JobConf job = new JobConf();<br>
    DistributedCache.addCacheArchive(new URI(args[2]), job);
    
  2. 在mapper或reducer中访问文件:

    public void setup(Context context) throws IOException, InterruptedException {
    
    Configuration conf = context.getConfiguration();
    FileSystem fs = FileSystem.getLocal(conf);
    
    Path[] dataFile = DistributedCache.getLocalCacheFiles(conf);
    BufferedReader cacheReader = new BufferedReader(new InputStreamReader(fs.open(dataFile[0])));
    // Implement here get_list method functionality
    
    }