在群集上运行hadoop程序的错误

时间:2013-04-03 05:41:58

标签: java linux unix hadoop mapreduce

我写了跟随hadoop程序执行并行文件索引

package org.myorg;

import java.io.*;
import java.util.*;
import org.apache.hadoop.fs.*;
import org.apache.hadoop.conf.*;
import org.apache.hadoop.io.*;
import org.apache.hadoop.mapred.*;
import org.apache.hadoop.util.*;

public class ParallelIndexation {
    //public static native long Traveser(String Path);

    //public static native void Configure(String Path);

    //static {
    //  System.loadLibrary("nativelib");
    //}
    public static class Map extends MapReduceBase implements
            Mapper<LongWritable, Text, Text, LongWritable> {
        private final static LongWritable zero = new LongWritable(0);
        private Text word = new Text();

        public void map(LongWritable key, Text value,
                OutputCollector<Text, LongWritable> output, Reporter reporter)
                throws IOException {
            Configuration conf = new Configuration();
            FileSystem fs = FileSystem.get(conf);
            Path localPath = new Path("/export/hadoop-1.0.1/bin/input/paths.txt");
            Path hdfsPath=new Path("hdfs://192.168.1.8:7000/user/hadoop/paths.txt");
            Path localPath1 = new Path("/usr/countcomputers.txt");                
            Path hdfsPath1=new Path("hdfs://192.168.1.8:7000/user/hadoop/countcomputers.txt");
            if (!fs.exists(hdfsPath))
            {
                fs.copyFromLocalFile(localPath, hdfsPath);
            };
            if (!fs.exists(hdfsPath1))
            {
                fs.copyFromLocalFile(localPath1, hdfsPath1);
            };          
            FSDataInputStream in = fs.open(hdfsPath);
            BufferedReader br = new BufferedReader(new InputStreamReader(in));
            String line = br.readLine();
            // String line = value.toString();
            BufferedReader br1=new BufferedReader(new InputStreamReader(fs.open(hdfsPath1)));
            int CountComputers;
            /* FileInputStream fstream = new FileInputStream(
                    "/usr/countcomputers.txt");

            BufferedReader br = new BufferedReader(new InputStreamReader(fstream)); */
            String result=br1.readLine();
            CountComputers=Integer.parseInt(result);
            // in.close();
            // fstream.close();
            ArrayList<String> paths = new ArrayList<String>();
            StringTokenizer tokenizer = new StringTokenizer(line, "|");
            while (tokenizer.hasMoreTokens()) {
                paths.add(tokenizer.nextToken());
            }
            String[] ConcatPaths = new String[CountComputers];
            int NumberOfElementConcatPaths = 0;
            if (paths.size() % CountComputers == 0) {
                for (int i = 0; i < CountComputers; i++) {
                    ConcatPaths[i] = paths.get(NumberOfElementConcatPaths);
                    NumberOfElementConcatPaths += paths.size() / CountComputers;
                    for (int j = 1; j < paths.size() / CountComputers; j++) {
                        ConcatPaths[i] += "\n"
                                + paths.get(i * paths.size() / CountComputers
                                        + j);
                    }
                }
            } else {
                NumberOfElementConcatPaths = 0;
                for (int i = 0; i < paths.size() % CountComputers; i++) {
                    ConcatPaths[i] = paths.get(NumberOfElementConcatPaths);
                    NumberOfElementConcatPaths += paths.size() / CountComputers
                            + 1;
                    for (int j = 1; j < paths.size() / CountComputers + 1; j++) {
                        ConcatPaths[i] += "\n"
                                + paths.get(i
                                        * (paths.size() / CountComputers + 1)
                                        + j);
                    }
                }
                for (int k = paths.size() % CountComputers; k < CountComputers; k++) {
                    ConcatPaths[k] = paths.get(NumberOfElementConcatPaths);
                    NumberOfElementConcatPaths += paths.size() / CountComputers;
                    for (int j = 1; j < paths.size() / CountComputers; j++) {
                        ConcatPaths[k] += "\n"
                                + paths.get((k - paths.size() % CountComputers)
                                        * paths.size() / CountComputers
                                        + paths.size() % CountComputers
                                        * (paths.size() / CountComputers + 1)
                                        + j);
                    }
                }
            }
            for (int i = 0; i < ConcatPaths.length; i++) {
                word.set(ConcatPaths[i]);
                output.collect(word, zero);
            }
            in.close();
        }
    }

    public static class Reduce extends MapReduceBase implements
            Reducer<Text, LongWritable, Text, LongWritable> {
        public native long Traveser(String Path);

        public native void Configure(String Path);

        public void reduce(Text key, Iterator<LongWritable> value,
                OutputCollector<Text, LongWritable> output, Reporter reporter)
                throws IOException {
            long count=0;
            String line = key.toString();
            ArrayList<String> ProcessedPaths = new ArrayList<String>();
            StringTokenizer tokenizer = new StringTokenizer(line, "\n");
            while (tokenizer.hasMoreTokens()) {
                ProcessedPaths.add(tokenizer.nextToken());
            }
            Configure("/export/hadoop-1.0.1/bin/nsindexer.conf");
            for (int i = 0; i < ProcessedPaths.size(); i++) {
                count = Traveser(ProcessedPaths.get(i));
            }
            output.collect(key, new LongWritable(count));
        }

        static {
            System.loadLibrary("nativelib");
        }
    }

    public static void main(String[] args) throws Exception {
        JobConf conf = new JobConf(ParallelIndexation.class);
        conf.setJobName("parallelindexation");
        conf.setOutputKeyClass(Text.class);
        conf.setOutputValueClass(LongWritable.class);
        conf.setMapperClass(Map.class);
        conf.setCombinerClass(Reduce.class);
        conf.setReducerClass(Reduce.class);
        conf.setInputFormat(TextInputFormat.class);
        conf.setOutputFormat(TextOutputFormat.class);
        FileInputFormat.setInputPaths(conf, new Path(args[0]));
        FileOutputFormat.setOutputPath(conf, new Path(args[1]));
        JobClient.runJob(conf);
    }
}

我也尝试在4台机器的集群上启动它,只有2台机器上执行Datanode和TaskNode的恶魔。在程序启动之前,我将/export/hadoop-1.0.1/bin/input/paths.txt文件复制到hdfs://192.168.1.8:7000 / user / hadoop / paths.txt和/usr/countcomputers.txt文件中在hdfs://192.168.1.8:7000 / user / hadoop / countcomputers.txt中通过命令

./hadoop fs -put /export/hadoop-1.0.1/bin/input/paths.txt /user/hadoop/paths.txt
./hadoop fs -put /usr/countcomputers.txt /user/hadoop/countcomputers.txt

我还通过命令

创建了该文件夹
./hadoop fs -mkdir /export/hadoop-1.0.1/bin/input

(我自己在群集计算机上启动了所有恶魔)。 作为通过命令执行的结果

./hadoop jar ParallelIndexation.jar org.myorg.ParallelIndexation /export/hadoop-1.0.1/bin/input /export/hadoop-1.0.1/bin/output 1> resultofexecution.txt 2>&1

我收到以下错误消息

13/04/02 13:15:48 WARN mapred.JobClient: Use GenericOptionsParser for parsing the arguments. Applications should implement Tool for the same.
13/04/02 13:15:48 INFO mapred.FileInputFormat: Total input paths to process : 0
13/04/02 13:15:49 INFO mapred.JobClient: Running job: job_201304012201_0015
13/04/02 13:15:50 INFO mapred.JobClient:  map 0% reduce 0%
13/04/02 13:15:58 INFO mapred.JobClient: Task Id : attempt_201304012201_0015_m_000001_0, Status : FAILED
java.lang.Throwable: Child Error
    at org.apache.hadoop.mapred.TaskRunner.run(TaskRunner.java:271)
Caused by: java.io.IOException: Task process exit with nonzero status of 126.
    at org.apache.hadoop.mapred.TaskRunner.run(TaskRunner.java:258)

13/04/02 13:16:33 WARN mapred.JobClient: Error reading task outputmyhost2
13/04/02 13:16:33 WARN mapred.JobClient: Error reading task outputmyhost2
13/04/02 13:16:34 INFO mapred.JobClient: Task Id : attempt_201304012201_0015_r_000002_0, Status : FAILED
java.lang.Throwable: Child Error
    at org.apache.hadoop.mapred.TaskRunner.run(TaskRunner.java:271)
Caused by: java.io.IOException: Task process exit with nonzero status of 126.
    at org.apache.hadoop.mapred.TaskRunner.run(TaskRunner.java:258)

13/04/02 13:16:34 WARN mapred.JobClient: Error reading task outputmyhost2
13/04/02 13:16:34 WARN mapred.JobClient: Error reading task outputmyhost2
13/04/02 13:16:34 INFO mapred.JobClient: Task Id : attempt_201304012201_0015_m_000001_1, Status : FAILED
java.lang.Throwable: Child Error
    at org.apache.hadoop.mapred.TaskRunner.run(TaskRunner.java:271)
Caused by: java.io.IOException: Task process exit with nonzero status of 126.
    at org.apache.hadoop.mapred.TaskRunner.run(TaskRunner.java:258)

13/04/02 13:16:34 WARN mapred.JobClient: Error reading task outputmyhost3
13/04/02 13:16:34 WARN mapred.JobClient: Error reading task outputmyhost3
13/04/02 13:16:34 INFO mapred.JobClient: Task Id : attempt_201304012201_0015_m_000001_2, Status : FAILED
java.lang.Throwable: Child Error
    at org.apache.hadoop.mapred.TaskRunner.run(TaskRunner.java:271)
Caused by: java.io.IOException: Task process exit with nonzero status of 126.
    at org.apache.hadoop.mapred.TaskRunner.run(TaskRunner.java:258)

13/04/02 13:16:34 WARN mapred.JobClient: Error reading task outputmyhost3
13/04/02 13:16:34 WARN mapred.JobClient: Error reading task outputmyhost3
13/04/02 13:16:34 INFO mapred.JobClient: Task Id : attempt_201304012201_0015_m_000000_0, Status : FAILED
java.lang.Throwable: Child Error
    at org.apache.hadoop.mapred.TaskRunner.run(TaskRunner.java:271)
Caused by: java.io.IOException: Task process exit with nonzero status of 126.
    at org.apache.hadoop.mapred.TaskRunner.run(TaskRunner.java:258)

13/04/02 13:16:34 WARN mapred.JobClient: Error reading task outputmyhost3
13/04/02 13:16:34 WARN mapred.JobClient: Error reading task outputmyhost3
13/04/02 13:16:35 INFO mapred.JobClient: Task Id : attempt_201304012201_0015_r_000001_0, Status : FAILED
java.lang.Throwable: Child Error
    at org.apache.hadoop.mapred.TaskRunner.run(TaskRunner.java:271)
Caused by: java.io.IOException: Task process exit with nonzero status of 126.
    at org.apache.hadoop.mapred.TaskRunner.run(TaskRunner.java:258)

13/04/02 13:16:35 WARN mapred.JobClient: Error reading task outputmyhost3
13/04/02 13:16:35 WARN mapred.JobClient: Error reading task outputmyhost3
13/04/02 13:16:42 INFO mapred.JobClient: Task Id : attempt_201304012201_0015_m_000000_1, Status : FAILED
java.lang.Throwable: Child Error
    at org.apache.hadoop.mapred.TaskRunner.run(TaskRunner.java:271)
Caused by: java.io.IOException: Task process exit with nonzero status of 126.
    at org.apache.hadoop.mapred.TaskRunner.run(TaskRunner.java:258)

13/04/02 13:16:42 WARN mapred.JobClient: Error reading task outputmyhost2
13/04/02 13:16:42 WARN mapred.JobClient: Error reading task outputmyhost2
13/04/02 13:16:48 INFO mapred.JobClient: Task Id : attempt_201304012201_0015_m_000000_2, Status : FAILED
java.lang.Throwable: Child Error
    at org.apache.hadoop.mapred.TaskRunner.run(TaskRunner.java:271)
Caused by: java.io.IOException: Task process exit with nonzero status of 126.
    at org.apache.hadoop.mapred.TaskRunner.run(TaskRunner.java:258)

13/04/02 13:16:48 WARN mapred.JobClient: Error reading task outputmyhost2
13/04/02 13:16:48 WARN mapred.JobClient: Error reading task outputmyhost2
13/04/02 13:16:54 INFO mapred.JobClient: Job complete: job_201304012201_0015
13/04/02 13:16:54 INFO mapred.JobClient: Counters: 4
13/04/02 13:16:54 INFO mapred.JobClient:   Job Counters 
13/04/02 13:16:54 INFO mapred.JobClient:     SLOTS_MILLIS_MAPS=30679
13/04/02 13:16:54 INFO mapred.JobClient:     Total time spent by all reduces waiting after reserving slots (ms)=0
13/04/02 13:16:54 INFO mapred.JobClient:     Total time spent by all maps waiting after reserving slots (ms)=0
13/04/02 13:16:54 INFO mapred.JobClient:     SLOTS_MILLIS_REDUCES=8131
13/04/02 13:16:54 INFO mapred.JobClient: Job Failed: JobCleanup Task Failure, Task: task_201304012201_0015_m_000000
Exception in thread "main" java.io.IOException: Job failed!
    at org.apache.hadoop.mapred.JobClient.runJob(JobClient.java:1265)
    at org.myorg.ParallelIndexation.main(ParallelIndexation.java:146)
    at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
    at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
    at java.lang.reflect.Method.invoke(Method.java:601)
    at org.apache.hadoop.util.RunJar.main(RunJar.java:156)

提示如何消除这些错误。 以下是文件hadoop-hadoop-tasktracker-myhost2.log和hadoop - hadoop-tasktracker-myhost3.log(类似)

的内容

hadoop-hadoop-tasktracker-myhost2.log

2013-04-07 19:29:25,151 INFO org.apache.hadoop.mapred.TaskTracker: LaunchTaskAction (registerTask): attempt_201304070459_0003_m_000001_1 task's state:UNASSIGNED
2013-04-07 19:29:25,151 INFO org.apache.hadoop.mapred.TaskTracker: Trying to launch : attempt_201304070459_0003_m_000001_1 which needs 1 slots
2013-04-07 19:29:25,151 INFO org.apache.hadoop.mapred.TaskTracker: In TaskLauncher, current free slots : 2 and trying to launch attempt_201304070459_0003_m_000001_1 which needs 1 slots
2013-04-07 19:29:25,239 INFO org.apache.hadoop.mapred.JobLocalizer: Initializing user hadoop on this TT.
2013-04-07 19:29:25,509 INFO org.apache.hadoop.mapred.JvmManager: In JvmRunner constructed JVM ID: jvm_201304070459_0003_m_2012583632
2013-04-07 19:29:25,510 INFO org.apache.hadoop.mapred.JvmManager: JVM Runner jvm_201304070459_0003_m_2012583632 spawned.
2013-04-07 19:29:25,512 INFO org.apache.hadoop.mapred.TaskController: Writing commands to /tmp/hadoop-hadoop/mapred/local/ttprivate/taskTracker/hadoop/jobcache/job_201304070459_0003/attempt_201304070459_0003_m_000001_1/taskjvm.sh
2013-04-07 19:29:25,553 WARN org.apache.hadoop.mapred.DefaultTaskController: Exit code from task is : 126
2013-04-07 19:29:25,553 INFO org.apache.hadoop.mapred.DefaultTaskController: Output from DefaultTaskController's launchTask follows:
2013-04-07 19:29:25,553 INFO org.apache.hadoop.mapred.TaskController: 
2013-04-07 19:29:25,554 INFO org.apache.hadoop.mapred.JvmManager: JVM Not killed jvm_201304070459_0003_m_2012583632 but just removed
2013-04-07 19:29:25,554 INFO org.apache.hadoop.mapred.JvmManager: JVM : jvm_201304070459_0003_m_2012583632 exited with exit code 126. Number of tasks it ran: 0
2013-04-07 19:29:25,555 WARN org.apache.hadoop.mapred.TaskRunner: attempt_201304070459_0003_m_000001_1 : Child Error
java.io.IOException: Task process exit with nonzero status of 126.
    at org.apache.hadoop.mapred.TaskRunner.run(TaskRunner.java:258)
2013-04-07 19:29:29,555 INFO org.apache.hadoop.mapred.TaskTracker: addFreeSlot : current free slots : 2
2013-04-07 19:29:31,172 INFO org.apache.hadoop.mapred.TaskTracker: LaunchTaskAction (registerTask): attempt_201304070459_0003_m_000001_2 task's state:UNASSIGNED
2013-04-07 19:29:31,173 INFO org.apache.hadoop.mapred.TaskTracker: Trying to launch : attempt_201304070459_0003_m_000001_2 which needs 1 slots
2013-04-07 19:29:31,173 INFO org.apache.hadoop.mapred.TaskTracker: In TaskLauncher, current free slots : 2 and trying to launch attempt_201304070459_0003_m_000001_2 which needs 1 slots
2013-04-07 19:29:31,210 INFO org.apache.hadoop.mapred.JvmManager: In JvmRunner constructed JVM ID: jvm_201304070459_0003_m_1529309377
2013-04-07 19:29:31,212 INFO org.apache.hadoop.mapred.JvmManager: JVM Runner jvm_201304070459_0003_m_1529309377 spawned.
2013-04-07 19:29:31,217 INFO org.apache.hadoop.mapred.TaskController: Writing commands to /tmp/hadoop-hadoop/mapred/local/ttprivate/taskTracker/hadoop/jobcache/job_201304070459_0003/attempt_201304070459_0003_m_000001_2/taskjvm.sh
2013-04-07 19:29:31,278 WARN org.apache.hadoop.mapred.DefaultTaskController: Exit code from task is : 126
2013-04-07 19:29:31,279 INFO org.apache.hadoop.mapred.DefaultTaskController: Output from DefaultTaskController's launchTask follows:
2013-04-07 19:29:31,279 INFO org.apache.hadoop.mapred.TaskController: 
2013-04-07 19:29:31,280 INFO org.apache.hadoop.mapred.JvmManager: JVM Not killed jvm_201304070459_0003_m_1529309377 but just removed
2013-04-07 19:29:31,281 WARN org.apache.hadoop.mapred.TaskRunner: attempt_201304070459_0003_m_000001_2 : Child Error
java.io.IOException: Task process exit with nonzero status of 126.
    at org.apache.hadoop.mapred.TaskRunner.run(TaskRunner.java:258)
2013-04-07 19:29:31,281 INFO org.apache.hadoop.mapred.JvmManager: JVM : jvm_201304070459_0003_m_1529309377 exited with exit code 126. Number of tasks it ran: 0
2013-04-07 19:29:34,281 INFO org.apache.hadoop.mapred.TaskTracker: addFreeSlot : current free slots : 2
2013-04-07 19:29:37,186 INFO org.apache.hadoop.mapred.TaskTracker: LaunchTaskAction (registerTask): attempt_201304070459_0003_m_000001_3 task's state:UNASSIGNED
2013-04-07 19:29:37,186 INFO org.apache.hadoop.mapred.TaskTracker: Trying to launch : attempt_201304070459_0003_m_000001_3 which needs 1 slots
2013-04-07 19:29:37,187 INFO org.apache.hadoop.mapred.TaskTracker: In TaskLauncher, current free slots : 2 and trying to launch attempt_201304070459_0003_m_000001_3 which needs 1 slots
2013-04-07 19:29:37,214 INFO org.apache.hadoop.mapred.JvmManager: In JvmRunner constructed JVM ID: jvm_201304070459_0003_m_786434533
2013-04-07 19:29:37,215 INFO org.apache.hadoop.mapred.JvmManager: JVM Runner jvm_201304070459_0003_m_786434533 spawned.
2013-04-07 19:29:37,218 INFO org.apache.hadoop.mapred.TaskController: Writing commands to /tmp/hadoop-hadoop/mapred/local/ttprivate/taskTracker/hadoop/jobcache/job_201304070459_0003/attempt_201304070459_0003_m_000001_3/taskjvm.sh
2013-04-07 19:29:37,269 WARN org.apache.hadoop.mapred.DefaultTaskController: Exit code from task is : 126
2013-04-07 19:29:37,269 INFO org.apache.hadoop.mapred.DefaultTaskController: Output from DefaultTaskController's launchTask follows:
2013-04-07 19:29:37,270 INFO org.apache.hadoop.mapred.TaskController: 
2013-04-07 19:29:37,270 INFO org.apache.hadoop.mapred.JvmManager: JVM Not killed jvm_201304070459_0003_m_786434533 but just removed
2013-04-07 19:29:37,271 WARN org.apache.hadoop.mapred.TaskRunner: attempt_201304070459_0003_m_000001_3 : Child Error
java.io.IOException: Task process exit with nonzero status of 126.
    at org.apache.hadoop.mapred.TaskRunner.run(TaskRunner.java:258)
2013-04-07 19:29:37,272 INFO org.apache.hadoop.mapred.JvmManager: JVM : jvm_201304070459_0003_m_786434533 exited with exit code 126. Number of tasks it ran: 0
2013-04-07 19:29:41,271 INFO org.apache.hadoop.mapred.TaskTracker: addFreeSlot : current free slots : 2
2013-04-07 19:29:43,203 INFO org.apache.hadoop.mapred.TaskTracker: LaunchTaskAction (registerTask): attempt_201304070459_0003_m_000000_0 task's state:UNASSIGNED
2013-04-07 19:29:43,203 INFO org.apache.hadoop.mapred.TaskTracker: Trying to launch : attempt_201304070459_0003_m_000000_0 which needs 1 slots
2013-04-07 19:29:43,203 INFO org.apache.hadoop.mapred.TaskTracker: In TaskLauncher, current free slots : 2 and trying to launch attempt_201304070459_0003_m_000000_0 which needs 1 slots
2013-04-07 19:29:43,203 INFO org.apache.hadoop.mapred.TaskTracker: Received KillTaskAction for task: attempt_201304070459_0003_m_000001_3
2013-04-07 19:29:43,222 INFO org.apache.hadoop.mapred.JvmManager: In JvmRunner constructed JVM ID: jvm_201304070459_0003_m_79286369
2013-04-07 19:29:43,223 INFO org.apache.hadoop.mapred.JvmManager: JVM Runner jvm_201304070459_0003_m_79286369 spawned.
2013-04-07 19:29:43,225 INFO org.apache.hadoop.mapred.TaskController: Writing commands to /tmp/hadoop-hadoop/mapred/local/ttprivate/taskTracker/hadoop/jobcache/job_201304070459_0003/attempt_201304070459_0003_m_000000_0/taskjvm.sh
2013-04-07 19:29:43,266 WARN org.apache.hadoop.mapred.DefaultTaskController: Exit code from task is : 126
2013-04-07 19:29:43,266 INFO org.apache.hadoop.mapred.DefaultTaskController: Output from DefaultTaskController's launchTask follows:
2013-04-07 19:29:43,266 INFO org.apache.hadoop.mapred.TaskController: 
2013-04-07 19:29:43,267 INFO org.apache.hadoop.mapred.JvmManager: JVM Not killed jvm_201304070459_0003_m_79286369 but just removed
2013-04-07 19:29:43,267 WARN org.apache.hadoop.mapred.TaskRunner: attempt_201304070459_0003_m_000000_0 : Child Error
java.io.IOException: Task process exit with nonzero status of 126.
    at org.apache.hadoop.mapred.TaskRunner.run(TaskRunner.java:258)
2013-04-07 19:29:43,268 INFO org.apache.hadoop.mapred.JvmManager: JVM : jvm_201304070459_0003_m_79286369 exited with exit code 126. Number of tasks it ran: 0
2013-04-07 19:29:47,268 INFO org.apache.hadoop.mapred.TaskTracker: addFreeSlot : current free slots : 2
2013-04-07 19:29:49,219 INFO org.apache.hadoop.mapred.TaskTracker: LaunchTaskAction (registerTask): attempt_201304070459_0003_r_000001_0 task's state:UNASSIGNED
2013-04-07 19:29:49,219 INFO org.apache.hadoop.mapred.TaskTracker: Trying to launch : attempt_201304070459_0003_r_000001_0 which needs 1 slots
2013-04-07 19:29:49,219 INFO org.apache.hadoop.mapred.TaskTracker: In TaskLauncher, current free slots : 2 and trying to launch attempt_201304070459_0003_r_000001_0 which needs 1 slots
2013-04-07 19:29:49,239 INFO org.apache.hadoop.mapred.JvmManager: In JvmRunner constructed JVM ID: jvm_201304070459_0003_r_383744348
2013-04-07 19:29:49,240 INFO org.apache.hadoop.mapred.JvmManager: JVM Runner jvm_201304070459_0003_r_383744348 spawned.
2013-04-07 19:29:49,241 INFO org.apache.hadoop.mapred.TaskController: Writing commands to /tmp/hadoop-hadoop/mapred/local/ttprivate/taskTracker/hadoop/jobcache/job_201304070459_0003/attempt_201304070459_0003_r_000001_0/taskjvm.sh
2013-04-07 19:29:49,265 WARN org.apache.hadoop.mapred.DefaultTaskController: Exit code from task is : 126
2013-04-07 19:29:49,265 INFO org.apache.hadoop.mapred.DefaultTaskController: Output from DefaultTaskController's launchTask follows:
2013-04-07 19:29:49,266 INFO org.apache.hadoop.mapred.TaskController: 
2013-04-07 19:29:49,266 INFO org.apache.hadoop.mapred.JvmManager: JVM Not killed jvm_201304070459_0003_r_383744348 but just removed
2013-04-07 19:29:49,266 WARN org.apache.hadoop.mapred.TaskRunner: attempt_201304070459_0003_r_000001_0 : Child Error
java.io.IOException: Task process exit with nonzero status of 126.
    at org.apache.hadoop.mapred.TaskRunner.run(TaskRunner.java:258)
2013-04-07 19:29:49,267 INFO org.apache.hadoop.mapred.JvmManager: JVM : jvm_201304070459_0003_r_383744348 exited with exit code 126. Number of tasks it ran: 0
2013-04-07 19:29:52,267 INFO org.apache.hadoop.mapred.TaskTracker: addFreeSlot : current free slots : 2
2013-04-07 19:30:16,294 INFO org.apache.hadoop.mapred.TaskTracker: Received 'KillJobAction' for job: job_201304070459_0003
2013-04-07 19:30:16,299 INFO org.apache.hadoop.mapred.UserLogCleaner: Adding job_201304070459_0003 for user-log deletion with retainTimeStamp:1365474616294

0 个答案:

没有答案