转换序列文件并通过map获取键值对,并减少hadoop中的任务

时间:2012-05-29 11:16:46

标签: hadoop mapreduce

我希望通过hadoop map reduce应用程序从顺序文件中获取所有键值对。 我跟着帖子http://lintool.github.com/Cloud9/docs/content/staging-records.html来阅读主要课程中的顺序文件但该工作正常。 我想将所有keysvalue对打印到hdfs系统中的普通文本文件,我该如何实现呢? 我写下了我的代码。

            import java.io.File;
            import java.io.IOException;
            import java.util.*;
            import java.util.logging.Level;
            import java.util.logging.Logger;

            import org.apache.hadoop.fs.Path;
            import org.apache.hadoop.conf.*;
            import org.apache.hadoop.fs.FileSystem;
            import org.apache.hadoop.fs.FileUtil;
            import org.apache.hadoop.io.*;

            import org.apache.hadoop.mapreduce.*;
            import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
            import org.apache.hadoop.mapreduce.lib.input.SequenceFileAsBinaryInputFormat;
            import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
            import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
            import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;

            public class WordCount
            {
                public static class Map extends Mapper
                {
                    private final static IntWritable one = new IntWritable(1);
                    private Text word = new Text();

                    public void map(BytesWritable key, BytesWritable value, Context context) throws IOException, InterruptedException
                    {
                        System.out.println(key.toString());
                        System.out.println(value.toString());
                        context.write(key, value);
                    }
                }
                public static class Reduce extends Reducer
                {
                    public void reduce(Text key, Iterable<IntWritable> values, Context context)
                            throws IOException, InterruptedException
                    {
                        int sum = 0;
                        for (IntWritable val : values)
                        {
                            sum += val.get();
                        }
                        context.write(key, new IntWritable(sum));
                    }
                }
                public static void main(String[] args) throws Exception
                {
                    FileUtil.fullyDelete(new File(args[1]));

                    Configuration conf = new Configuration();

                    Job job = new Job(conf, "wordcount");

                    job.setOutputKeyClass(BytesWritable.class);
                    job.setOutputValueClass(BytesWritable.class);

                    job.setMapperClass(Map.class);
                    job.setReducerClass(Reduce.class);

                    job.setInputFormatClass(org.apache.hadoop.mapreduce.lib.input.SequenceFileAsBinaryInputFormat.class);
                    job.setOutputFormatClass(TextOutputFormat.class);

                    FileInputFormat.addInputPath(job, new Path(args[0]));
                    FileOutputFormat.setOutputPath(job, new Path(args[1]));

                    job.setJarByClass(WordCount.class);

                    job.waitForCompletion(true);
                }
            }

2 个答案:

答案 0 :(得分:1)

使用以下代码读取所有键/值对。根据需要更改它。

public class SequenceFileReader {
    public static void main(String args[]) throws Exception {
        System.out.println("Readeing Sequence File");
        Configuration conf = new Configuration();
        conf.addResource(new Path("/home/mohammad/hadoop-0.20.203.0/conf/core-site.xml"));
        conf.addResource(new Path("/home/mohammad/hadoop-0.20.203.0/conf/hdfs-site.xml"));  
        FileSystem fs = FileSystem.get(conf);
        Path path = new Path("/seq/file");
        SequenceFile.Reader reader = null;      
        try {
            reader = new SequenceFile.Reader(fs, path, conf);
            Writable key = (Writable) ReflectionUtils.newInstance(reader.getKeyClass(), conf);
            Writable value = (Writable) ReflectionUtils.newInstance(reader.getValueClass(), conf);
            while (reader.next(key, value)) {
                System.out.println(key + "  <===>  " + value.toString());
            }
        } catch (Exception e) {
            e.printStackTrace();
        } finally {
            IOUtils.closeStream(reader);
        }

}

}

答案 1 :(得分:1)

Please find the Below program. It may be useful in getting some idea in converting BytesWritable to Text.

import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.io.InputStream;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.io.WritableUtils;
import org.apache.hadoop.io.SequenceFile.Reader;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.util.ReflectionUtils;


public class SequenceFileRead {
    public static void main(String args[]) throws IOException{
        Configuration conf=new Configuration();
        Path path=new Path(args[0]);
        SequenceFile.Reader reader=null;
        try{
        reader=new SequenceFile.Reader(conf, Reader.file(path));
        Text key= new Text();
        BytesWritable value=new BytesWritable();
        while(reader.next(key,value)){
            System.out.println(key);
            byte[] bytes=value.getBytes();
            int size=bytes.length;
            byte[] b=new byte[size];
            InputStream is=new ByteArrayInputStream(bytes);
            is.read(b);
            System.out.println(new String(b));
        }
        }
        finally {
            IOUtils.closeStream(reader);
        }

    }

}