在java map reduce中选择distinct查询

时间:2015-06-23 16:28:39

标签: hadoop dictionary mapreduce

10001|76884|1995-06-24|1996-06-23
10001|76884|1995-06-24|1996-06-23
10001|75286|1993-06-24|1994-06-24

我的目标是删除dup值,输出就像

10001|76884|1995-06-24|1996-06-23
10001|75286|1993-06-24|1994-06-24

我写了一个代码如下

import java.io.IOException;
import java.util.*;

import org.apache.hadoop.fs.Path;

import org.apache.hadoop.conf.*;

import org.apache.hadoop.io.*;

import org.apache.hadoop.mapred.JobClient;

import org.apache.hadoop.mapreduce.*;

import org.apache.hadoop.mapreduce.Mapper.Context;

import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;

import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;

import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;

public class charterSelDistRec {

        public static class Map extends Mapper <LongWritable, Text, Text, Text> {
            private String tableKey,tableValue;

            public void map(LongWritable key, Text value, Context context)
 throws IOException, InterruptedException {

                    String line = value.toString();
                    String splitarray[] = line.split("\\|",2);
                    tableKey = splitarray[0].trim();
                    tableValue = splitarray[1].trim();

                    context.write(new Text(tableKey), new Text(tableValue));     
                }
        }               

        public static class Reduce extends Reducer <Text, Text, Text, Text> {                         
            public void reduce(Text key, Iterator<Text> values, Context context) 
                      throws IOException, InterruptedException {
                    String ColumnDelim="";
                    String tableOutValue=ColumnDelim+values;
                    context.write(new Text(key), new Text(tableOutValue));

                }
        }

        public static void main(String[] args) throws Exception {
                Configuration conf = new Configuration();
                Job job = new Job(conf,"charterSelDistRec");
                job.getConfiguration().set("mapreduce.job.queuename", "root.Dev");
                job.getConfiguration().set("mapreduce.output.textoutputformat.separator","|");
                job.setJobName("work_charter_stb.ext_chtr_vod_fyi_mapped");
                job.setOutputKeyClass(Text.class);
                job.setOutputValueClass(Text.class);

                job.setMapperClass(Map.class);

                job.setReducerClass(Reduce.class);

                job.setInputFormatClass(TextInputFormat.class);
                job.setOutputFormatClass(TextOutputFormat.class);


                FileInputFormat.addInputPath(job, new Path(args[0]));
                FileOutputFormat.setOutputPath(job, new Path(args[1]));
                job.setJarByClass(charterSelDistRec.class); 
                job.waitForCompletion(true);
          }
      }

但输出文件仍然有重复。请让我知道我错在哪里。

3 个答案:

答案 0 :(得分:3)

不一定非常复杂。您所要做的就是:

    在mapper中
  1. ,将每一行作为键和任何值

  2. 发出 在reducer中
  3. ,只需发出键并忽略值。

  4. 分享代码:

    这是输入:

    10001|76884|1995-06-24|1996-06-23
    10001|76884|1995-06-24|1996-06-23
    10001|75286|1993-06-24|1994-06-24
    

    以下是代码:

    public class StackRemoveDup {
    
        public  static class MyMapper extends Mapper<LongWritable,Text, Text, NullWritable> {
    
            @Override
            public void map(LongWritable ignore, Text value, Context context)
                throws java.io.IOException, InterruptedException {
                context.write(value,NullWritable.get());
            }  
        }
    
        public static class MyReducer extends Reducer<Text, NullWritable, Text, NullWritable> {
    
          @Override
          public void reduce(Text key, Iterable<NullWritable> values, Context context)
              throws IOException, InterruptedException {
            context.write(key, NullWritable.get());
          }
        }       
    
      public static void main(String[] args) 
                      throws IOException, ClassNotFoundException, InterruptedException {
    
        Job job = new Job();
        job.setJarByClass(StackRemoveDup.class);
        job.setJobName("StackRemoveDup");
    
        job.setMapperClass(MyMapper.class);
        job.setReducerClass(MyReducer.class);
    
        FileInputFormat.addInputPath(job, new Path(args[0]));
        FileOutputFormat.setOutputPath(job, new Path(args[1]));
    
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(NullWritable.class);
    
        job.waitForCompletion(true);
      }
    }
    

    这是输出:

    10001|75286|1993-06-24|1994-06-24
    10001|76884|1995-06-24|1996-06-23
    

答案 1 :(得分:0)

第一行有两条记录,第二行有一条记录。一旦在map中读完,你就会基于|进行拆分,但你的行(实体)是按空格分隔的,正如我所看到的。只需验证实际数据是否如何。传统格式就像是,每行(实体)在一行中,而map reduce会在映射阶段之后过滤唯一键。一旦您的输入采用该格式,您在reducer中获得的只是唯一键。

如果您的输入有所不同(如上面 - 同一行中的2条记录),您需要考虑不同的输入格式,或者以不同的方式处理逻辑。了解map reduce如何工作以及它所采用的格式将对您有所帮助。快乐学习

答案 2 :(得分:0)

试试这个。这个想法只发出Iterable的第一个值,因为它们都是相同的,你想删除dup值。

import java.io.IOException;
import java.util.*;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.conf.*;
import org.apache.hadoop.io.*;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapreduce.*;
import org.apache.hadoop.mapreduce.Mapper.Context;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;

public class charterSelDistRec {

    public  static class MyMapper extends Mapper<LongWritable, Text, Text, Text> {

        @Override
        public void map(LongWritable ignore, Text value, Context context)
            throws IOException, InterruptedException {
            context.write(value, value);
        }  
    }

    public static class MyReducer extends Reducer<Text, Text, Text, NullWritable> {    
      @Override
      public void reduce(Text key, Iterable<Text> values, Context context)
          throws IOException, InterruptedException {
          for (Text value : values){
              context.write(value, NullWritable.get());
              break;
          }
      }
    }       

  /* This is your main. Changed the outputValueClass method only */
  public static void main(String[] args) throws Exception {
      Configuration conf = new Configuration();
      Job job = new Job(conf,"charterSelDistRec");
      job.getConfiguration().set("mapreduce.job.queuename", "root.Dev");
      job.getConfiguration().set("mapreduce.output.textoutputformat.separator","|");
      job.setJobName("work_charter_stb.ext_chtr_vod_fyi_mapped");
      job.setOutputKeyClass(Text.class);
      job.setOutputValueClass(NullWritable.class);

      job.setMapperClass(Map.class);

      job.setReducerClass(Reduce.class);

      job.setInputFormatClass(TextInputFormat.class);
      job.setOutputFormatClass(TextOutputFormat.class);


      FileInputFormat.addInputPath(job, new Path(args[0]));
      FileOutputFormat.setOutputPath(job, new Path(args[1]));
      job.setJarByClass(charterSelDistRec.class); 
      job.waitForCompletion(true);
   }
}