java.lang.NoClassDefFoundError:Hadoop 1.2.1中的org / apache / commons / logging / LogFactory Kmean算法

时间:2015-03-30 04:17:07

标签: eclipse hadoop

我尝试从中运行示例 http://codingwiththomas.blogspot.kr/2011/05/k-means-clustering-with-mapreduce.html  但它有错误

 log4j:WARN Error during default initialization
java.lang.NoClassDefFoundError: org/apache/log4j/AppenderSkeleton
    at java.lang.ClassLoader.findBootstrapClass(Native Method)
    at java.lang.ClassLoader.findBootstrapClassOrNull(ClassLoader.java:1070)
    at java.lang.ClassLoader.loadClass(ClassLoader.java:414)
    at java.lang.ClassLoader.loadClass(ClassLoader.java:412)
    at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
    at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
    at java.lang.Class.forName0(Native Method)
    at java.lang.Class.forName(Class.java:190)
    at org.apache.log4j.helpers.Loader.loadClass(Loader.java:179)
    at org.apache.log4j.helpers.OptionConverter.instantiateByClassName(OptionConverter.java:320)
    at org.apache.log4j.helpers.OptionConverter.instantiateByKey(OptionConverter.java:121)
    at org.apache.log4j.PropertyConfigurator.parseAppender(PropertyConfigurator.java:664)
    at org.apache.log4j.PropertyConfigurator.parseCategory(PropertyConfigurator.java:647)
    at org.apache.log4j.PropertyConfigurator.configureRootCategory(PropertyConfigurator.java:544)
    at org.apache.log4j.PropertyConfigurator.doConfigure(PropertyConfigurator.java:440)
    at org.apache.log4j.PropertyConfigurator.doConfigure(PropertyConfigurator.java:476)
    at org.apache.log4j.helpers.OptionConverter.selectAndConfigure(OptionConverter.java:471)
    at org.apache.log4j.LogManager.<clinit>(LogManager.java:125)
    at org.apache.log4j.Logger.getLogger(Logger.java:105)
    at org.apache.commons.logging.impl.Log4JLogger.getLogger(Log4JLogger.java:289)
    at org.apache.commons.logging.impl.Log4JLogger.<init>(Log4JLogger.java:109)
    at sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method)
    at sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:57)
    at sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)
    at java.lang.reflect.Constructor.newInstance(Constructor.java:526)
    at org.apache.commons.logging.impl.LogFactoryImpl.createLogFromClass(LogFactoryImpl.java:1116)
    at org.apache.commons.logging.impl.LogFactoryImpl.discoverLogImplementation(LogFactoryImpl.java:914)
    at org.apache.commons.logging.impl.LogFactoryImpl.newInstance(LogFactoryImpl.java:604)
    at org.apache.commons.logging.impl.LogFactoryImpl.getInstance(LogFactoryImpl.java:336)
    at org.apache.commons.logging.impl.LogFactoryImpl.getInstance(LogFactoryImpl.java:310)
    at org.apache.commons.logging.LogFactory.getLog(LogFactory.java:685)
    at com.clustering.mapreduce.KMeansClusteringJob.<clinit>(KMeansClusteringJob.java:22)

Exception in thread "main" java.lang.NoClassDefFoundError: org/apache/commons/logging/LogFactory
    at org.apache.hadoop.conf.Configuration.<clinit>(Configuration.java:146)
    at com.clustering.mapreduce.KMeansClusteringJob.main(KMeansClusteringJob.java:28)

虽然我在我的项目中添加了hadoop / lib和hadoop.core.1.2.1.jar上的所有jar文件。我的代码

public class KMeansClusteringJob {

private static final Log LOG = LogFactory.getLog(KMeansClusteringJob.class);

public static void main(String[] args) throws IOException,
        InterruptedException, ClassNotFoundException {

    int iteration = 1;
    Configuration conf = new Configuration();
    conf.set("num.iteration", iteration + "");

    Path in = new Path("files/clustering/import/data");
    Path center = new Path("files/clustering/import/center/cen.seq");
    conf.set("centroid.path", center.toString());
    Path out = new Path("files/clustering/depth_1");

    Job job = new Job(conf);
    job.setJobName("KMeans Clustering");

    job.setMapperClass(KMeansMapper.class);
    job.setReducerClass(KMeansReducer.class);
    job.setJarByClass(KMeansMapper.class);

    SequenceFileInputFormat.addInputPath(job, in);
    FileSystem fs = FileSystem.get(conf);
    if (fs.exists(out))
        fs.delete(out, true);

    if (fs.exists(center))
        fs.delete(out, true);

    if (fs.exists(in))
        fs.delete(out, true);

    final SequenceFile.Writer centerWriter = SequenceFile.createWriter(fs,
            conf, center, ClusterCenter.class, IntWritable.class);
    final IntWritable value = new IntWritable(0);
    centerWriter.append(new ClusterCenter(new Vector(1, 1)), value);
    centerWriter.append(new ClusterCenter(new Vector(5, 5)), value);
    centerWriter.close();

    final SequenceFile.Writer dataWriter = SequenceFile.createWriter(fs,
            conf, in, ClusterCenter.class, Vector.class);
    dataWriter
            .append(new ClusterCenter(new Vector(0, 0)), new Vector(1, 2));
    dataWriter.append(new ClusterCenter(new Vector(0, 0)),
            new Vector(16, 3));
    dataWriter
            .append(new ClusterCenter(new Vector(0, 0)), new Vector(3, 3));
    dataWriter
            .append(new ClusterCenter(new Vector(0, 0)), new Vector(2, 2));
    dataWriter
            .append(new ClusterCenter(new Vector(0, 0)), new Vector(2, 3));
    dataWriter.append(new ClusterCenter(new Vector(0, 0)),
            new Vector(25, 1));
    dataWriter
            .append(new ClusterCenter(new Vector(0, 0)), new Vector(7, 6));
    dataWriter
            .append(new ClusterCenter(new Vector(0, 0)), new Vector(6, 5));
    dataWriter.append(new ClusterCenter(new Vector(0, 0)), new Vector(-1,
            -23));
    dataWriter.close();

    SequenceFileOutputFormat.setOutputPath(job, out);
    job.setInputFormatClass(SequenceFileInputFormat.class);
    job.setOutputFormatClass(SequenceFileOutputFormat.class);

    job.setOutputKeyClass(ClusterCenter.class);
    job.setOutputValueClass(Vector.class);

    job.waitForCompletion(true);

    long counter = job.getCounters()
            .findCounter(KMeansReducer.Counter.CONVERGED).getValue();
    iteration++;
    while (counter > 0) {
        conf = new Configuration();
        conf.set("centroid.path", center.toString());
        conf.set("num.iteration", iteration + "");
        job = new Job(conf);
        job.setJobName("KMeans Clustering " + iteration);

        job.setMapperClass(KMeansMapper.class);
        job.setReducerClass(KMeansReducer.class);
        job.setJarByClass(KMeansMapper.class);

        in = new Path("files/clustering/depth_" + (iteration - 1) + "/");
        out = new Path("files/clustering/depth_" + iteration);

        SequenceFileInputFormat.addInputPath(job, in);
        if (fs.exists(out))
            fs.delete(out, true);

        SequenceFileOutputFormat.setOutputPath(job, out);
        job.setInputFormatClass(SequenceFileInputFormat.class);
        job.setOutputFormatClass(SequenceFileOutputFormat.class);
        job.setOutputKeyClass(ClusterCenter.class);
        job.setOutputValueClass(Vector.class);

        job.waitForCompletion(true);
        iteration++;
        counter = job.getCounters()
                .findCounter(KMeansReducer.Counter.CONVERGED).getValue();
    }

    Path result = new Path("files/clustering/depth_" + (iteration - 1)
            + "/");

    FileStatus[] stati = fs.listStatus(result);
    for (FileStatus status : stati) {
        if (!status.isDir()) {
            Path path = status.getPath();
            LOG.info("FOUND " + path.toString());
            SequenceFile.Reader reader = new SequenceFile.Reader(fs, path,
                    conf);
            ClusterCenter key = new ClusterCenter();
            Vector v = new Vector();
            while (reader.next(key, v)) {
                LOG.info(key + " / " + v);
            }
            reader.close();
        }
    }
}

应用程序是否需要更多jar

1 个答案:

答案 0 :(得分:0)

错误明确要求apache-commons记录jar。 请将JAR添加到您的类路径。