扩展扩展Hadoop的Mapper的类

时间:2016-08-06 19:48:44

标签: java hadoop

这是来自扩展Mapper类的Hadoop的Map类[1]的示例。 [3]是Hadoop的Mapper类。

我想创建我的MyExampleMapper,扩展ExampleMapper,同时扩展了hadoop的Mapper [2]。我这样做是因为我只想在ExampleMapper中设置一个属性,这样当我创建MyExampleMapper或其他示例时,我不必自己设置属性,因为我已经扩展了ExampleMapper。有可能这样做吗?

[1]示例映射器

import org.apache.hadoop.mapreduce.Mapper;

public class ExampleMapper 
     extends Mapper<Object, Text, Text, IntWritable>{

   private final static IntWritable one = new IntWritable(1);
   private Text word = new Text();

   public void map(Object key, Text value, Context context) throws IOException, InterruptedException {
     StringTokenizer itr = new StringTokenizer(value.toString());
     while (itr.hasMoreTokens()) {
       word.set(itr.nextToken());
       context.write(word, one);
     }
   }
 }

[2]我想要什么

import org.apache.hadoop.mapreduce.Mapper;

public class MyExampleMapper 
     extends ExampleMapper<Object, Text, Text, IntWritable>{

   private final static IntWritable one = new IntWritable(1);
   private Text word = new Text();

   public void map(Object key, Text value, Context context) throws IOException, InterruptedException {
     StringTokenizer itr = new StringTokenizer(value.toString());

     String result = System.getProperty("job.examplemapper")

     if (result.equals("true")) {
       while (itr.hasMoreTokens()) {
         word.set(itr.nextToken());
         context.write(word, one);
       }
     }
   }
 }


public class ExampleMapper<KEYIN, VALUEIN, KEYOUT, VALUEOUT> 
     extends Mapper{

   System.setProperty("job.examplemapper", "true");
 }

[3]这是Hadoop的Mapper类

public class Mapper<KEYIN, VALUEIN, KEYOUT, VALUEOUT> {
    public Mapper() {
    }

    protected void setup(Mapper.Context context) throws IOException, InterruptedException {
    }

    protected void map(KEYIN key, VALUEIN value, Mapper.Context context) throws IOException, InterruptedException {
        context.write(key, value);
    }

    protected void cleanup(Mapper.Context context) throws IOException, InterruptedException {
    }

    public void run(Mapper.Context context) throws IOException, InterruptedException {
        this.setup(context);

        try {
            while(context.nextKeyValue()) {
                this.map(context.getCurrentKey(), context.getCurrentValue(), context);
            }
        } finally {
            this.cleanup(context);
        }

    }

    public class Context extends MapContext<KEYIN, VALUEIN, KEYOUT, VALUEOUT> {
        public Context(Configuration var1, TaskAttemptID conf, RecordReader<KEYIN, VALUEIN> taskid, RecordWriter<KEYOUT, VALUEOUT> reader, OutputCommitter writer, StatusReporter committer, InputSplit reporter) throws IOException, InterruptedException {
            super(conf, taskid, reader, writer, committer, reporter, split);
        }
    }
}

1 个答案:

答案 0 :(得分:2)

import org.apache.hadoop.mapreduce.Mapper;

public class ExampleMapper<T, X, Y, Z> extends Mapper<T, X, Y, Z> {
    static {
        System.setProperty("job.examplemapper", "true");
    }
}

然后在你的程序中扩展它

public class MyExampleMapper 
     extends ExampleMapper<Object, Text, Text, IntWritable>{

   private final static IntWritable one = new IntWritable(1);
   private Text word = new Text();

   public void map(Object key, Text value, Context context) throws IOException, InterruptedException {
     StringTokenizer itr = new StringTokenizer(value.toString());

     String result = System.getProperty("job.examplemapper")

     if (result.equals("true")) {
       while (itr.hasMoreTokens()) {
         word.set(itr.nextToken());
         context.write(word, one);
       }
     }
   }
 }