Hadoop错误.ClassCastException:org.apache.hadoop.io.LongWritable无法强制转换为org.apache.hadoop.io.Text

时间:2014-04-04 04:27:07

标签: java hadoop mapreduce

我的计划如下:

    public static class MapClass extends Mapper<Text, Text, Text, LongWritable> {

        public void map(Text key, Text value, Context context) throws IOException, InterruptedException {
            // your map code goes here
            String[] fields = value.toString().split(",");

            for(String str : fields) {
                context.write(new Text(str), new LongWritable(1L));
            }
        }
    }
   public int run(String args[]) throws Exception {
        Job job = new Job();
        job.setJarByClass(TopOS.class);

        job.setMapperClass(MapClass.class);

        FileInputFormat.setInputPaths(job, new Path(args[0]));
        FileOutputFormat.setOutputPath(job, new Path(args[1]));

        job.setJobName("TopOS");
        job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(LongWritable.class);
        job.setNumReduceTasks(0);
        boolean success = job.waitForCompletion(true);
        return success ? 0 : 1;
    }

    public static void main(String args[]) throws Exception {
        int ret = ToolRunner.run(new TopOS(), args);
        System.exit(ret);
    }
}

我的数据如下:

123456,Windows,6.1,6394829384232,343534353,23432,23434343,12322
123456,OSX,10,6394829384232,23354353,23432,23434343,63635
123456,Windows,6.0,5396459384232,343534353,23432,23434343,23635
123456,Windows,6.0,6393459384232,343534353,23432,23434343,33635

为什么我收到以下错误?我怎么能绕过这个?

Hadoop : java.lang.ClassCastException: org.apache.hadoop.io.LongWritable cannot be cast to org.apache.hadoop.io.Text

2 个答案:

答案 0 :(得分:3)

从我的角度来看,代码中只有一个小错误。

当您使用平面文本文件作为输入时,固定密钥类是LongWritable(您不需要/使用的)和值类ist Text。

将Mapper中的keyClass设置为Object以强调您不使用它,可以消除错误。

这是我稍加修改的代码。

package org.woopi.stackoverflow.q22853574;

import org.apache.hadoop.io.*;
import org.apache.hadoop.mapreduce.lib.input.*;
import org.apache.hadoop.mapreduce.lib.output.*;
import org.apache.hadoop.mapreduce.*;
import org.apache.hadoop.fs.Path;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;

public class MapReduceJob {

  public static class MapClass extends Mapper<Object, Text, Text, LongWritable> {

    public void map(Object key, Text value, Context context) throws IOException, InterruptedException {
        // your map code goes here
        String[] fields = value.toString().split(",");

        for(String str : fields) {
            context.write(new Text(str), new LongWritable(1L));
        }
    }
  }

    public int run(String args[]) throws Exception {
        Configuration conf = new Configuration();
        Job job = Job.getInstance(conf);
        job.setJarByClass(MapReduceJob.class);

        job.setMapperClass(MapClass.class);

        FileInputFormat.setInputPaths(job, new Path(args[0]));
        FileOutputFormat.setOutputPath(job, new Path(args[1]));

        job.setJobName("MapReduceJob");
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(LongWritable.class);
        job.setNumReduceTasks(0);
        job.setInputFormatClass(TextInputFormat.class);
        boolean success = job.waitForCompletion(true);
        return success ? 0 : 1;
    }

  public static void main(String args[]) throws Exception {
    MapReduceJob j = new MapReduceJob();
    int ret = j.run(args);
    System.exit(ret);
  }

我希望这会有所帮助。

马丁

答案 1 :(得分:1)

你能用吗

//Set the key class for the job output data.
job.setOutputKeyClass(Class<?> theClass)
//Set the value class for job outputs
job.setOutputValueClass(Class<?> theClass) 

而不是setMapOutputKeyClass和setMapOutputValueClass方法。