使用自定义FileInputFormat时遇到错误

时间:2013-12-03 00:25:57

标签: hadoop

您好,我是MapReduce编程的新手,我正在尝试从PDF文件中读取,以便我可以扩展程序,以便能够进行字数统计 以下是我的计划

package com.pdfreader;

import java.io.IOException;
import java.util.HashSet;
import java.util.StringTokenizer;

import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.FileInputFormat;
import org.apache.hadoop.mapred.FileOutputFormat;
import org.apache.hadoop.mapred.InputSplit;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.MapReduceBase;
import org.apache.hadoop.mapred.Mapper;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.RecordReader;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapred.TextOutputFormat;

import com.itextpdf.text.pdf.PdfReader;
import com.itextpdf.text.pdf.parser.PdfTextExtractor;
import com.pdfreader.newPDFReader.PDFInputFormat;

public class PDFReader1 {

    static class PDFInputFormat extends FileInputFormat<Text, Text>
    {


            // TODO Auto-generated method stub

            @Override
            public RecordReader<Text, Text> getRecordReader(InputSplit arg0,
                    JobConf arg1, Reporter arg2) throws IOException {
                // TODO Auto-generated method stub
                HashSet<String> hset=new HashSet<String>();

                PdfReader reader=new PdfReader("/home/a/Desktop/a.pdf");
                Integer pagecount=reader.getNumberOfPages();

                for(int i=1;i<=pagecount;i++)
                {
                    String page=PdfTextExtractor.getTextFromPage(reader, i);
                    StringTokenizer tokenizer=new StringTokenizer(page);

                    while(tokenizer.hasMoreTokens())
                    {
                        String word=tokenizer.nextToken();
                        hset.add(word);
                    }
                }
                return null;
            }
    }

    class WordCountMapper extends MapReduceBase implements Mapper<LongWritable, Text, Text, IntWritable>{


        @Override
        public  void map(LongWritable key, Text value,
                OutputCollector<Text, IntWritable> output, Reporter reporter)
                throws IOException {
            // TODO Auto-generated method stub
            String line=value.toString();
            StringTokenizer tokenizer=new StringTokenizer(line);

        }

    }

    public static void main(String[] args) throws IOException {
        // TODO Auto-generated method stub
        JobConf conf=new JobConf(PDFReader1.class);
        conf.setJobName("PDFInputFormat");

        conf.setOutputKeyClass(Text.class);
        conf.setOutputValueClass(IntWritable.class);

        conf.setMapperClass(WordCountMapper.class);


        conf.setInputFormat(PDFInputFormat.class);
        conf.setOutputFormat(TextOutputFormat.class);

        FileInputFormat.setInputPaths(conf,new Path ("/home/a/Desktop/a.pdf"));
        FileOutputFormat.setOutputPath(conf, new Path("/home/a/Desktop/Hadoop"));

        JobClient.runJob(conf);

    }

}

然而,这似乎不起作用

13/12/01 09:46:39 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable
13/12/01 09:46:39 WARN mapred.JobClient: Use GenericOptionsParser for parsing the arguments. Applications should implement Tool for the same.
13/12/01 09:46:39 WARN mapred.JobClient: No job jar file set.  User classes may not be found. See JobConf(Class) or JobConf#setJar(String).
13/12/01 09:46:39 INFO mapred.FileInputFormat: Total input paths to process : 1
13/12/01 09:46:40 INFO mapred.JobClient: Running job: job_local1646351819_0001
13/12/01 09:46:40 INFO mapred.LocalJobRunner: Waiting for map tasks
13/12/01 09:46:40 INFO mapred.LocalJobRunner: Starting task: attempt_local1646351819_0001_m_000000_0
13/12/01 09:46:40 INFO util.ProcessTree: setsid exited with exit code 0
13/12/01 09:46:40 INFO mapred.Task:  Using ResourceCalculatorPlugin : org.apache.hadoop.util.LinuxResourceCalculatorPlugin@12b7eea
13/12/01 09:46:41 INFO mapred.MapTask: Processing split: file:/home/vaibhavsrivastava/Desktop/a.pdf:0+382078
13/12/01 09:46:41 INFO mapred.JobClient:  map 0% reduce 0%
13/12/01 09:46:43 INFO mapred.MapTask: numReduceTasks: 1
13/12/01 09:46:43 INFO mapred.MapTask: io.sort.mb = 100
13/12/01 09:47:27 INFO mapred.MapTask: data buffer = 79691776/99614720
13/12/01 09:47:27 INFO mapred.MapTask: record buffer = 262144/327680
13/12/01 09:47:31 INFO mapred.LocalJobRunner: Map task executor complete.
13/12/01 09:47:31 WARN mapred.LocalJobRunner: job_local1646351819_0001
java.lang.Exception: java.lang.RuntimeException: Error in configuring object
    at org.apache.hadoop.mapred.LocalJobRunner$Job.run(LocalJobRunner.java:354)
Caused by: java.lang.RuntimeException: Error in configuring object
    at org.apache.hadoop.util.ReflectionUtils.setJobConf(ReflectionUtils.java:93)
    at org.apache.hadoop.util.ReflectionUtils.setConf(ReflectionUtils.java:64)
    at org.apache.hadoop.util.ReflectionUtils.newInstance(ReflectionUtils.java:117)
    at org.apache.hadoop.mapred.MapTask.runOldMapper(MapTask.java:426)
    at org.apache.hadoop.mapred.MapTask.run(MapTask.java:366)
    at org.apache.hadoop.mapred.LocalJobRunner$Job$MapTaskRunnable.run(LocalJobRunner.java:223)
    at java.util.concurrent.Executors$RunnableAdapter.call(Unknown Source)
    at java.util.concurrent.FutureTask$Sync.innerRun(Unknown Source)
    at java.util.concurrent.FutureTask.run(Unknown Source)
    at java.util.concurrent.ThreadPoolExecutor$Worker.runTask(Unknown Source)
    at java.util.concurrent.ThreadPoolExecutor$Worker.run(Unknown Source)
    at java.lang.Thread.run(Unknown Source)
Caused by: java.lang.reflect.InvocationTargetException
    at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    at sun.reflect.NativeMethodAccessorImpl.invoke(Unknown Source)
    at sun.reflect.DelegatingMethodAccessorImpl.invoke(Unknown Source)
    at java.lang.reflect.Method.invoke(Unknown Source)
    at org.apache.hadoop.util.ReflectionUtils.setJobConf(ReflectionUtils.java:88)
    ... 11 more
Caused by: java.lang.RuntimeException: java.lang.NoSuchMethodException: org.apache.hadoop.mapred.Mapper.<init>()
    at org.apache.hadoop.util.ReflectionUtils.newInstance(ReflectionUtils.java:115)
    at org.apache.hadoop.mapred.MapRunner.configure(MapRunner.java:34)
    ... 16 more
Caused by: java.lang.NoSuchMethodException: org.apache.hadoop.mapred.Mapper.<init>()
    at java.lang.Class.getConstructor0(Unknown Source)
    at java.lang.Class.getDeclaredConstructor(Unknown Source)
    at org.apache.hadoop.util.ReflectionUtils.newInstance(ReflectionUtils.java:109)
    ... 17 more
13/12/01 09:47:32 INFO mapred.JobClient: Job complete: job_local1646351819_0001
13/12/01 09:47:32 INFO mapred.JobClient: Counters: 0
13/12/01 09:47:32 INFO mapred.JobClient: Job Failed: NA
Exception in thread "main" java.io.IOException: Job failed!
    at org.apache.hadoop.mapred.JobClient.runJob(JobClient.java:1357)
    at com.pdfreader.PDFReader1.main(PDFReader1.java:91)

有人可以帮助解决这个问题,这样我就可以了解如何做到这一点

2 个答案:

答案 0 :(得分:0)

您的mapper类当前是PDFReader1类的子类 - 因此对于它的默认构造函数依赖于该父对象。您无法从代码中看到这一点,但是在编译的源代码上运行javap,您会看到使用单个PDFReader1参数生成构造函数。

您看到的堆栈跟踪与此问题有关 - Hadoop使用反射来实例化您的映射器类,但要求映射器具有无arg默认构造函数。

这很容易修复 - 只需在映射器类名称之前添加static关键字:

public static class WordCountMapper extends MapReduceBase

答案 1 :(得分:0)

package com.pdfreader;

import java.io.IOException;
import java.util.HashSet;
import java.util.StringTokenizer;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.FileInputFormat;
import org.apache.hadoop.mapred.FileOutputFormat;
import org.apache.hadoop.mapred.FileSplit;
import org.apache.hadoop.mapred.InputSplit;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.MapReduceBase;
import org.apache.hadoop.mapred.Mapper;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.RecordReader;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapred.TaskAttemptContext;
import org.apache.hadoop.mapred.TextOutputFormat;
import org.apache.hadoop.record.Buffer;

import com.itextpdf.text.pdf.PdfReader;
import com.itextpdf.text.pdf.parser.PdfTextExtractor;


public class PDFReader1 {

    public static class PDFInputFormat extends FileInputFormat<Text, Text>
    {


            // TODO Auto-generated method stub

            @Override
            public RecordReader<Text, Text> getRecordReader(InputSplit arg0,
                    JobConf arg1, Reporter arg2) throws IOException 
                    {
                // TODO Auto-generated method stub
                return new PDFRecordReader();
                    }

            public static class PDFRecordReader implements RecordReader<Text, Text>
            {
                /*HashSet<String> hset=new HashSet<String>();

                PdfReader reader=new PdfReader("/home/vaibhavsrivastava/Desktop/a.pdf");
                Integer pagecount=reader.getNumberOfPages();

                for(int i=1;i<=pagecount;i++)
                {
                    String page=PdfTextExtractor.getTextFromPage(reader, i);
                    StringTokenizer tokenizer=new StringTokenizer(page);

                    while(tokenizer.hasMoreTokens())
                    {
                        String word=tokenizer.nextToken();
                        hset.add("/home/vaibhavsrivastava/Desktop/a.pdf"+" "+word);
                    }
                }*/

                private FSDataInputStream fileIn;
                public String fileName=null;
                HashSet<String> hset=new HashSet<String>();

                private Text key=null;
                private Text value=null;



                @Override
                public void close() throws IOException {
                    // TODO Auto-generated method stub

                }
                @Override
                public Text createKey() {
                    // TODO Auto-generated method stub
                    return null;
                }
                @Override
                public Text createValue() {
                    // TODO Auto-generated method stub
                    return value;
                }
                @Override
                public long getPos() throws IOException {
                    // TODO Auto-generated method stub
                    return 0;
                }
                @Override
                public float getProgress() throws IOException {
                    // TODO Auto-generated method stub
                    return 0;
                }
                @Override
                public boolean next(Text arg0, Text arg1) throws IOException {
                    // TODO Auto-generated method stub
                    return false;
                }


                public void initialize(InputSplit genericSplit, TaskAttemptContext job) throws
                IOException
                {
                    FileSplit split=(FileSplit) genericSplit;
                    Configuration conf=job.getConfiguration();

                    Path file=split.getPath();
                    FileSystem fs=file.getFileSystem(conf);
                    fileIn= fs.open(split.getPath());

                    fileName=split.getPath().getName().toString();
                    PdfReader reader=new PdfReader(fileName);
                    Integer pagecount=reader.getNumberOfPages();

                    for(int i=1;i<=pagecount;i++)
                    {
                        String page=PdfTextExtractor.getTextFromPage(reader, i);
                        StringTokenizer tokenizer=new StringTokenizer(page);

                        while(tokenizer.hasMoreTokens())
                        {
                            String word=tokenizer.nextToken();
                            value=new Text("a");
                        }
                    }

                }
                }


    }

    public static class WordCountMapper extends MapReduceBase implements Mapper<LongWritable, Text, Text, IntWritable>{


        @Override
        public  void map(LongWritable key, Text value,
                OutputCollector<Text, IntWritable> output, Reporter reporter)
                throws IOException {
            // TODO Auto-generated method stub
            String line=value.toString();
            System.out.println(line);
            StringTokenizer tokenizer=new StringTokenizer(line);
            while(tokenizer.hasMoreTokens())
            {
            output.collect(new Text(tokenizer.nextToken()),new IntWritable(1));
            }

        }

    }

    public static void main(String[] args) throws IOException {
        // TODO Auto-generated method stub
        JobConf conf=new JobConf(PDFReader1.class);
        conf.setJobName("PDFReader1");

        conf.setOutputKeyClass(Text.class);
        conf.setOutputValueClass(IntWritable.class);

        conf.setMapperClass(WordCountMapper.class);


        conf.setInputFormat(PDFInputFormat.class);
        conf.setOutputFormat(TextOutputFormat.class);

        FileInputFormat.setInputPaths(conf,new Path ("/home/a/Desktop/test"));
        FileOutputFormat.setOutputPath(conf, new Path("/home/a/Desktop/Hadoop"));

        JobClient.runJob(conf);

    }

}

最后成功删除了空指针,程序运行正常而没有拾取任何东西,映射器似乎没有做任何事情。

我错过了需要添加的内容