读取hadoop mapreduce中的参数

时间:2014-09-24 05:39:16

标签: hadoop mapreduce

我是hadoop mapreduce的新手。我正在尝试在地图中实现搜索减少所以我的输入文件是这样的

key1 value1,value3
key2 value2,value6

我想找到用户将作为命令行参数传递的键的值列表。为此我的主(驱动程序)类就像这样

public static void main(String[] args) {
    JobClient client = new JobClient();
    JobConf conf = new JobConf(NameSearchJava.class);

// write now I am trying with writing search key in code (Joy),later I'll 
//try to pass argument while running job from hadoop.

    conf.set("searcKey", "Joy"); 
    conf.setJobName("Search");

    conf.setOutputKeyClass(Text.class);
    conf.setOutputValueClass(Text.class);

    FileInputFormat.setInputPaths(conf, new Path(args[0]));
    FileOutputFormat.setOutputPath(conf, new Path(args[1]));


    conf.setMapperClass(SearchMapper.class);

    conf.setReducerClass(SearchReducer.class);
    client.setConf(conf);

    try {
      JobClient.runJob(conf);
    } catch (Exception e) {
      e.printStackTrace();
    }
  }
}

and my configure function is:

      String item ;
      public void configure(JobConf job) {
          {
           item = job.get("test");
          System.out.println(item);
          System.err.println("search" + item);
          }

我应该在Mapper或Reducer中编写configure函数。如何使用此item参数在reducer中进行比较。这是在hadoop中获取参数的正确方法吗?

2 个答案:

答案 0 :(得分:2)

加入Hadooper的答案。

这是完整的代码。

您可以参考Hadooper的答案进行解释。

import java.io.IOException;
import java.util.StringTokenizer;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;

/**
 * @author Unmesha sreeveni
 * @Date 23 sep 2014
 */
public class StringSearchDriver extends Configured implements Tool {
    public static class Map extends
    Mapper<LongWritable, Text, Text, IntWritable> {

        private final static IntWritable one = new IntWritable(1);
        private Text word = new Text();

        public void map(LongWritable key, Text value, Context context)
                throws IOException, InterruptedException {
            Configuration conf = context.getConfiguration();
            String line = value.toString();
            String searchString = conf.get("word");
            StringTokenizer tokenizer = new StringTokenizer(line);
            while (tokenizer.hasMoreTokens()) {
                String token = tokenizer.nextToken();
                if(token.equals(searchString)){
                    word.set(token);
                    context.write(word, one);
                }

            }
        }
    }

    public static class Reduce extends
    Reducer<Text, IntWritable, Text, IntWritable> {

        public void reduce(Text key, Iterable<IntWritable> values,
                Context context) throws IOException, InterruptedException {

            int sum = 0;
            for (IntWritable val : values) {
                sum += val.get();
            }
            context.write(key, new IntWritable(sum));
        }
    }
    public static void main(String[] args) throws Exception {
        Configuration conf = new Configuration();
        int res = ToolRunner.run(conf, new StringSearchDriver(), args);
        System.exit(res);

    }
    @Override
    public int run(String[] args) throws Exception {
        // TODO Auto-generated method stub
        if (args.length != 3) {
            System.out
            .printf("Usage: Search String <input dir> <output dir> <search word> \n");
            System.exit(-1);
        }

        String source = args[0];
        String dest = args[1];
        String searchword = args[2];
        Configuration conf = new Configuration();
        conf.set("word", searchword);
        Job job = new Job(conf, "Search String");
        job.setJarByClass(StringSearchDriver.class);
        FileSystem fs = FileSystem.get(conf);

        Path in =new Path(source);
        Path out =new Path(dest);
        if (fs.exists(out)) {
            fs.delete(out, true);
        }

        job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(IntWritable.class);
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(IntWritable.class);
        job.setMapperClass(Map.class);
        job.setReducerClass(Reduce.class);
        job.setInputFormatClass(TextInputFormat.class);
        job.setOutputFormatClass(TextOutputFormat.class);
        FileInputFormat.addInputPath(job, in);
        FileOutputFormat.setOutputPath(job, out);
        boolean sucess = job.waitForCompletion(true);
        return (sucess ? 0 : 1);
    }
}

答案 1 :(得分:1)

读取Driver类中的命令行参数,如下所示 -

conf.set("searchKey", args[2]);

其中args [2]将作为第三个参数传递的搜索键。

configure方法应该在Mapper中编码,如下所示 -

String searchWord;

    public void configure(JobConf jc)
    {
        searchWord = jc.get("searchKey");
    }

这将使您的密钥在Mapper函数中被搜索。

您可以使用以下逻辑在Mapper中执行比较 -

public void map(LongWritable key, Text value,  
            OutputCollector<Text, IntWritable> out, Reporter reporter)
            throws IOException
    {
        String[] input = value.toString().split(" ");

        for(String word:input)
        {
            if (word.equalsIgnoreCase(searchWord))
                out.collect(new Text(word), new IntWritable(1));
        }
    }

请告诉我这是否有帮助!