运行hadoop时磁盘已满

时间:2015-04-17 06:34:19

标签: hadoop mapreduce diskspace

我运行了一个递归的map / reduce程序。出了问题,它几乎消耗了C盘中可用的所有磁盘空间。因此,我关闭了资源管理器节点管理器名称节点数据节点控制台。 现在我有一个几乎已满的C驱动器,我不知道如何清空磁盘空间并使我的C驱动器像以前一样。我现在应该怎么做。任何帮助表示赞赏。 这是代码

public class apriori {

public static class CandidateGenMap extends Mapper<LongWritable, Text, Text, Text>
{
    private Text word = new Text();
    private Text count = new Text();
    private int Support = 5; 

    public void CandidatesGenRecursion(Vector<String> in, Vector<String> out, 
                                        int length, int level, int start,                           
                                        Context context) throws IOException {


        int i,size;

        for(i=start;i<length;i++) {
            if(level==0){
                out.add(in.get(i));
                } else {

                out.add(in.get(i));

                int init=1;
                StringBuffer current = new StringBuffer();
                for(String s:out)
                {   
                    if(init==1){
                        current.append(s);  
                        init=0;
                    } else {
                        current.append(" ");
                        current.append(s);  
                    }
                }


                word.set(current.toString());
                count.set(Integer.toString(1));
                try {
                    context.write(word, count);
                } catch (InterruptedException e) {
                    // TODO Auto-generated catch block
                    e.printStackTrace();
                }
            }
            if(i < length-1) {
                CandidatesGenRecursion(in, out, length,level+1,i+1, context);
            }
            size = out.size();
            if(size>0){
                out.remove(size-1);
            }
        }

    }

    @Override
    public void map(LongWritable key,Text value,Context context) throws IOException
    {
        String line = value.toString();
        StringTokenizer tokenizer = new StringTokenizer(line);
        String[] token=new String[2];
       int i=0;
        while(tokenizer.hasMoreTokens()){
           token[i]= tokenizer.nextToken();
           ++i;
        }

        StringTokenizer urlToken = new StringTokenizer(token[1],",");


        Vector<String> lst = new Vector<String>(); 
        int loop=0;
        while (urlToken.hasMoreTokens()) {

            String str = urlToken.nextToken();

                lst.add(str);
                loop++;

        }


        Vector<String> combinations = new Vector<String>(); 

        if(!lst.isEmpty()) {
            CandidatesGenRecursion(lst, combinations, loop,0,0, context);
        }

    }
}



public static class CandidateGenReduce extends Reducer<Text, IntWritable, Text, IntWritable>
{

    public void reduce(Text key,Iterator<IntWritable> values,Context context) throws IOException
    {

        int sum = 0;            
        while (values.hasNext()) {
            sum += values.next().get();
        }
        try {               
            context.write(key, new IntWritable(sum));
            } catch (InterruptedException e) {              
            e.printStackTrace();
        }       

    }
}


public static void main(String[] args) throws Exception
{

    Date dt;
    long start,end; // Start and end time


    //Start Timer
    dt = new Date();
    start = dt.getTime();


    Configuration conf1 = new Configuration();
    System.out.println("Starting Job2");
    Job job2 = new Job(conf1, "apriori candidate gen");
    job2.setJarByClass(apriori.class);

    job2.setMapperClass(CandidateGenMap.class);
    job2.setCombinerClass(CandidateGenReduce.class); //
    job2.setReducerClass(CandidateGenReduce.class);
    job2.setMapOutputKeyClass(Text.class);
    job2.setMapOutputValueClass(Text.class);
    job2.setOutputKeyClass(Text.class);
    job2.setOutputValueClass(IntWritable.class);

    job2.setInputFormatClass(TextInputFormat.class);
    job2.setOutputFormatClass(TextOutputFormat.class);

    FileInputFormat.addInputPath(job2, new Path(args[0]));
    FileOutputFormat.setOutputPath(job2, new Path(args[1]));
    job2.waitForCompletion(true);
    //End Timer
    dt = new Date();
    end = dt.getTime();


}

}

3 个答案:

答案 0 :(得分:0)

Hadoop需要足够的磁盘空间用于每个阶段的i / 0操作(map,reduce等)。

答案 1 :(得分:0)

检查您的HDFS作业输出路径并删除内容。

列出内容:

$ sudo -u hdfs hadoop fs -ls [YourJobOutputPath]

使用的磁盘:

$ sudo -u hdfs hadoop fs -du -h [YourJobOutputPath]

删除内容(小心!,它是递归的):

$ sudo -u hdfs hadoop fs -rm -R [YourJobOutputPath]

答案 2 :(得分:0)

删除输出目录可能有助于从MapReduce作业创建的文件中释放磁盘。