我试图通过MapReduce作业将MongoDb中的集合文档导入HDFS。我正在使用旧的Api。这是驱动程序代码
package my.pac;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.FileOutputFormat;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.TextOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
import com.mongodb.hadoop.mapred.MongoInputFormat;
import com.mongodb.hadoop.util.MongoConfigUtil;
public class ImportDriver extends Configured implements Tool {
public static void main(String[] args) throws Exception {
int exitCode = ToolRunner.run(new ImportDriver(), args);
System.exit(exitCode);
}
@Override
public int run(String[] args) throws Exception {
JobConf conf = new JobConf();
MongoConfigUtil.setInputURI(conf,"mongodb://127.0.0.1:27017/SampleDb.shows");
conf.setJarByClass(ImportDriver.class);
conf.addResource(new Path("/usr/lib/hadoop/hadoop-1.2.1/conf/core-site.xml"));
conf.addResource(new Path("/usr/lib/hadoop/hadoop-1.2.1/conf/hdfs-site.xml"));
FileOutputFormat.setOutputPath(conf, new Path(args[0]));
conf.setInputFormat(MongoInputFormat.class);
conf.setOutputFormat(TextOutputFormat.class);
conf.setMapperClass(ImportMapper.class);
conf.setMapOutputKeyClass(Text.class);
conf.setMapOutputKeyClass(Text.class);
JobClient.runJob(conf);
return 0;
}
}

这是我的Mapper代码:
package my.pac;
import java.io.IOException;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.MapReduceBase;
import org.apache.hadoop.mapred.Mapper;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reporter;
import org.bson.BSONObject;
import com.mongodb.hadoop.io.BSONWritable;
public class ImportMapper extends MapReduceBase implements Mapper<BSONWritable, BSONWritable, Text, Text>{
@Override
public void map(BSONWritable key, BSONWritable value,
OutputCollector<Text, Text> o, Reporter arg3)
throws IOException {
String val = ((BSONObject) value).get("_id").toString();
System.out.println(val);
o.collect( new Text(val), new Text(val));
}
}
&#13;
我正在使用
我添加了以下罐子:
当我运行时,我收到这样的错误:
java.lang.Exception: java.lang.ClassCastException: com.mongodb.hadoop.io.BSONWritable cannot be cast to org.bson.BSONObject
at org.apache.hadoop.mapred.LocalJobRunner$Job.run(LocalJobRunner.java:354)
Caused by: java.lang.ClassCastException: com.mongodb.hadoop.io.BSONWritable cannot be cast to org.bson.BSONObject
at my.pac.ImportMapper.map(ImportMapper.java:18)
at my.pac.ImportMapper.map(ImportMapper.java:1)
at org.apache.hadoop.mapred.MapRunner.run(MapRunner.java:50)
at org.apache.hadoop.mapred.MapTask.runOldMapper(MapTask.java:430)
at org.apache.hadoop.mapred.MapTask.run(MapTask.java:366)
at org.apache.hadoop.mapred.LocalJobRunner$Job$MapTaskRunnable.run(LocalJobRunner.java:223)
at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511)
at java.util.concurrent.FutureTask.run(FutureTask.java:266)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
at java.lang.Thread.run(Thread.java:745)
&#13;
我该如何纠正这个?
答案 0 :(得分:0)
您的类路径中可能有一个过时的驱动程序,导致读取首选项设置发生冲突。
请参阅以下类似问题的链接: https://jira.mongodb.org/browse/JAVA-849
如果这没有帮助, https://jira.talendforge.org/browse/TBD-1002 建议您可能需要重新运行MongoDB或使用单独的连接。
答案 1 :(得分:0)
显然我使用的所有罐子都是正确的。我尝试从BSONWritable中获取数据的方式是错误的。我试图将BSONWritable转换为无法生成的BSONObject。以下是我解决问题的方法。
String name = (String)value.getDoc().get("name");