Spark投掷了一个" NoClassDefFoundError"尽管jar指示类存在

时间:2017-07-10 01:38:20

标签: java apache-spark ctakes

我收到NoClassDefFoundError,尽管7Zip表示包含该类的jar存在于提交运行程序的uberjar中。我提交的内容如下:

spark-submit  --class org.dia.red.ctakes.spark.CtakesSparkMain target/spark-ctakes-0.1-job.jar

抛出的错误是:

Exception in thread "main" java.lang.NoClassDefFoundError: org/apache/uima/cas/FSIndex
        at org.dia.red.ctakes.spark.CtakesSparkMain.main(CtakesSparkMain.java:50)
        at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
        at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
        at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
        at java.lang.reflect.Method.invoke(Method.java:498)
        at org.apache.spark.deploy.SparkSubmit$.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:743)
        at org.apache.spark.deploy.SparkSubmit$.doRunMain$1(SparkSubmit.scala:187)
        at org.apache.spark.deploy.SparkSubmit$.submit(SparkSubmit.scala:212)
        at org.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:126)
        at org.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)
Caused by: java.lang.ClassNotFoundException: org.apache.uima.cas.FSIndex
        at java.net.URLClassLoader.findClass(URLClassLoader.java:381)
        at java.lang.ClassLoader.loadClass(ClassLoader.java:424)
        at java.lang.ClassLoader.loadClass(ClassLoader.java:357)
        ... 10 more

下面的CtakesSparkMain课程会调用CtakesFunction课程:

package org.dia.red.ctakes.spark;

import java.util.List;
import java.io.PrintWriter;

import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;

import org.apache.uima.jcas.cas.FSArray;
import org.apache.spark.api.java.function.Function;
import org.apache.spark.storage.StorageLevel;
import org.json.JSONObject;


public class CtakesSparkMain {

    /**
     * @param args
     */
    public static void main(String[] args) throws Exception {


        SparkConf conf = new SparkConf().setAppName("ctakes");
        JavaSparkContext sc = new JavaSparkContext(conf);

        JavaRDD<String> lines = sc.textFile("/mnt/d/metistream/ctakes-streaming/SparkStreamingCTK/testdata100.txt").map(new CtakesFunction());

        String first = lines.take(2).get(0);
        PrintWriter out = new PrintWriter("/mnt/d/metistream/ctakes-streaming/SparkStreamingCTK/test_outputs/output.txt");
        out.println(first);
        out.close();
        sc.close();

    }
}

CtakesFunction:

package org.dia.red.ctakes.spark;

import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.ObjectInputStream;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Iterator;

import org.apache.ctakes.typesystem.type.refsem.OntologyConcept;
import org.apache.ctakes.typesystem.type.textsem.*;
import org.apache.uima.UIMAException;
import org.apache.uima.cas.FSIndex;
import org.apache.uima.cas.Type;

import org.apache.uima.UIMAException;

import org.apache.uima.jcas.JCas;
import org.apache.uima.analysis_engine.AnalysisEngineDescription;
import org.apache.uima.cas.impl.XmiCasSerializer;
import org.apache.uima.fit.factory.JCasFactory;
import org.apache.uima.fit.pipeline.SimplePipeline;

import org.apache.uima.jcas.cas.FSArray;
import org.apache.uima.util.XMLSerializer;
import org.apache.spark.api.java.function.Function;

import it.cnr.iac.CTAKESClinicalPipelineFactory;
import org.json.*;

/**
 * @author Selina Chu, Michael Starch, and Giuseppe Totaro
 *
 */

public class CtakesFunction implements Function<String, String> {

    transient JCas jcas = null;
    transient AnalysisEngineDescription aed = null;

    private void setup() throws UIMAException {

        System.setProperty("ctakes.umlsuser", "");
        System.setProperty("ctakes.umlspw", "");
        this.jcas = JCasFactory.createJCas();
        this.aed = CTAKESClinicalPipelineFactory.getDefaultPipeline();

    }

    private void readObject(ObjectInputStream in) {
        try {
            in.defaultReadObject();
            this.setup();
        } catch (ClassNotFoundException e) {
            e.printStackTrace();
        } catch (IOException e) {
            e.printStackTrace();
        } catch (UIMAException e) {
            e.printStackTrace();
        }
    }

    @Override
    public String call(String paragraph) throws Exception {

        this.jcas.setDocumentText(paragraph);

        ByteArrayOutputStream baos = new ByteArrayOutputStream();
        SimplePipeline.runPipeline(this.jcas, this.aed);
        FSIndex index = this.jcas.getAnnotationIndex(IdentifiedAnnotation.type);
        Iterator iter = index.iterator();


        JSONArray annotationsArray = new JSONArray();
        JSONObject allAnnotations = new JSONObject();

        ArrayList<String> types = new ArrayList<String>();

        types.add("org.apache.ctakes.typesystem.type.textsem.SignSymptomMention");
        types.add("org.apache.ctakes.typesystem.type.textsem.DiseaseDisorderMention");
        types.add("org.apache.ctakes.typesystem.type.textsem.AnatomicalSiteMention");
        types.add("org.apache.ctakes.typesystem.type.textsem.ProcedureMention");
        types.add("import org.apache.ctakes.typesystem.type.textsem.MedicationMention");

        String type;
        String[] splitType;
        FSArray snomedArray;
        ArrayList<String> snomedStringArray = new ArrayList<String>();

        while (iter.hasNext()){
            IdentifiedAnnotation annotation = (IdentifiedAnnotation)iter.next();
            type = annotation.getType().toString();
            if (types.contains(type)){
                JSONObject annotations  = new JSONObject();

                splitType = type.split("[.]");
                annotations.put("id", annotation.getId());
                annotations.put("subject", annotation.getSubject());
                annotations.put("type", splitType[splitType.length - 1]);
                annotations.put("text", annotation.getCoveredText());
                annotations.put("polarity", annotation.getPolarity());
                annotations.put("confidence", annotation.getConfidence());

                snomedArray = annotation.getOntologyConceptArr();
                for (int i = 0; i < snomedArray.size(); i++){
                    snomedStringArray.add(((OntologyConcept)snomedArray.get(i)).getCode());
                }
                annotations.put("snomed_codes", snomedStringArray);
                snomedStringArray.clear();
                annotationsArray.put(annotations);
            }

        }

        allAnnotations.put("Annotations", annotationsArray);
        this.jcas.reset();
        return allAnnotations.toString();
    }
}

我试图修改存储库@ https://github.com/selinachu/SparkStreamingCTK以利用常规Spark而不是SparkStreaming(和Spark 2.0),但是还没有能够解决这个问题。

2 个答案:

答案 0 :(得分:2)

这是因为这不完全是maven为这个项目生成的超级jar。 Spark-submit无法从jar中的jar加载类。人们需要一个特殊的类加载器。正确的方法是爆炸所有罐子将所有包含的类放在uber-jar中类似于maven shade插件的作用https://maven.apache.org/plugins/maven-shade-plugin/

因此您必须更改pom.xml文件以为此项目生成正确的uber-jar。

答案 1 :(得分:1)

受YuGagarin的反馈启发,我使用SBT组装来组装自己的UberJar cTAKES。将所有内容编译成一个&#34; true&#34;胖罐解决了上述问题。

但是,我应该指出,目前我正在解决cTAKES和Spark的一些遗留问题。