DataNode READ_BLOCK或WRITE_BLOCK错误

时间:2014-01-06 07:59:30

标签: hadoop

我在datanode日志中发现以下错误。有人遇到了同样的问题吗? 这是什么原因,我该如何解决?

错误日志如下:

2014-01-05 00:14:40,589 ERROR org.apache.hadoop.hdfs.server.datanode.DataNode: l-hdps56.data.cn6:50010:DataXceiver error processing WRITE_BLOCK operation  src: /192.168.246.57:35455 dest: /192.168.246.75:50010
java.io.IOException: Premature EOF from inputStream
    at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:194)
    at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:213)
    at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:134)
    at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:109)
    at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:435)
    at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:693)
    at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:569)
    at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:115)
    at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:68)
    at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:221)
    at java.lang.Thread.run(Thread.java:744)

2014-01-05 02:50:52,552 ERROR org.apache.hadoop.hdfs.server.datanode.DataNode: l-hdps56.data.cn6:50010:DataXceiver error processing READ_BLOCK operation  src: /192.168.246.39:46693 dest: /192.168.246.75:50010
org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException: Replica not found for BP-1398136447-192.168.246.60-1386067202761:blk_1089253308_1099605945003
    at org.apache.hadoop.hdfs.server.datanode.BlockSender.getReplica(BlockSender.java:418)
    at org.apache.hadoop.hdfs.server.datanode.BlockSender.<init>(BlockSender.java:228)
    at org.apache.hadoop.hdfs.server.datanode.DataXceiver.readBlock(DataXceiver.java:327)
    at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opReadBlock(Receiver.java:101)
    at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:65)
    at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:221)
    at java.lang.Thread.run(Thread.java:744)


2014-01-05 07:39:30,939 ERROR org.apache.hadoop.hdfs.server.datanode.DataNode: DataNode{data=FSDataset{dirpath='[/data1/hadoop/dfs/current, /data2/hadoop/dfs/current, /data3/hadoop/dfs/current, /data4/hadoop/dfs/current, /data5/hadoop/dfs/current, /data6/hadoop/dfs/current, /data7/hadoop/dfs/current, /data8/hadoop/dfs/current, /data9/hadoop/dfs/current, /data10/hadoop/dfs/current, /data11/hadoop/dfs/current, /data12/hadoop/dfs/current]'}, localName='l-hdps56.data.cn6:50010', storageID='DS-1312530819-192.168.246.75-50010-1388062043867', xmitsInProgress=0}:Exception transfering block BP-1398136447-192.168.246.60-1386067202761:blk_1089401766_1099606093471 to mirror 192.168.242.51:50010: java.io.IOException: Connection reset by peer
2014-01-05 07:39:30,939 INFO org.apache.hadoop.hdfs.server.datanode.DataNode: opWriteBlock BP-1398136447-192.168.246.60-1386067202761:blk_1089401766_1099606093471 received exception java.io.IOException: Connection reset by peer
2014-01-05 07:39:30,939 ERROR org.apache.hadoop.hdfs.server.datanode.DataNode: l-hdps56.data.cn6:50010:DataXceiver error processing WRITE_BLOCK operation  src: /192.168.246.75:44779 dest: /192.168.246.75:50010
java.io.IOException: Connection reset by peer
    at sun.nio.ch.FileDispatcherImpl.read0(Native Method)
    at sun.nio.ch.SocketDispatcher.read(SocketDispatcher.java:39)
    at sun.nio.ch.IOUtil.readIntoNativeBuffer(IOUtil.java:223)
    at sun.nio.ch.IOUtil.read(IOUtil.java:197)
    at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:379)
    at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57)
    at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:142)
    at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161)
    at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131)
    at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118)
    at java.io.FilterInputStream.read(FilterInputStream.java:83)
    at java.io.FilterInputStream.read(FilterInputStream.java:83)
    at org.apache.hadoop.hdfs.protocolPB.PBHelper.vintPrefixed(PBHelper.java:1490)
    at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:511)
    at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:115)
    at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:68)
    at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:221)
    at java.lang.Thread.run(Thread.java:744)


2014-01-05 09:47:58,332 ERROR org.apache.hadoop.hdfs.server.datanode.DataNode: l-hdps56.data.cn6:50010:DataXceiver error processing READ_BLOCK operation  src: /192.168.246.40:48386 dest: /192.168.246.75:50010
java.net.SocketTimeoutException: 480000 millis timeout while waiting for channel to be ready for write. ch : java.nio.channels.SocketChannel[connected local=/192.168.246.75:50010 remote=/192.168.246.40:48386]
    at org.apache.hadoop.net.SocketIOWithTimeout.waitForIO(SocketIOWithTimeout.java:246)
    at org.apache.hadoop.net.SocketOutputStream.waitForWritable(SocketOutputStream.java:172)
    at org.apache.hadoop.net.SocketOutputStream.transferToFully(SocketOutputStream.java:220)
    at org.apache.hadoop.hdfs.server.datanode.BlockSender.sendPacket(BlockSender.java:546)
    at org.apache.hadoop.hdfs.server.datanode.BlockSender.sendBlock(BlockSender.java:710)
    at org.apache.hadoop.hdfs.server.datanode.DataXceiver.readBlock(DataXceiver.java:340)
    at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opReadBlock(Receiver.java:101)
    at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:65)
    at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:221)
    at java.lang.Thread.run(Thread.java:744)

感谢您的关注!

0 个答案:

没有答案