无法坚持写''到HDFS中的特定数据节点

时间:2014-03-31 00:15:13

标签: java sockets hadoop hdfs

我尝试创建索引策略,要求索引块与数据块位于同一数据节点上,以减少数据检索时的延迟。我设法编写了用于读取与特定文件相关的数据块的代码。为了写入,我打开到特定datanode的套接字连接,写入我的数据,然后关闭套接字。不幸的是,我不确定&#39;在哪里&#39;或者&#39;如何&#39;数据是使用这种方法编写的,因为当我使用hadoop fs -ls查询HDFS时,我无法看到我的数据写在任何地方(在一些流浪文件中可能?!),但我的程序执行没有任何错误。< / p>

这是我的代码:

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;

import java.io.BufferedOutputStream;
import java.io.DataOutputStream;
import java.io.OutputStream;
import java.io.RandomAccessFile;
import java.net.InetSocketAddress;
import java.net.Socket;
import java.nio.channels.FileChannel;
import java.nio.file.OpenOption;
import java.nio.file.StandardOpenOption;
import java.util.Random;
import java.nio.ByteBuffer;

import javax.net.SocketFactory;

import org.apache.hadoop.security. UserGroupInformation;

public class CopyOfChunkedIndexes {

    public static void main(String[] args) throws Exception {
        if (args.length != 1) {
            System.err.println("Usage: ChunkedIndexes <input path>");
            System.exit(-1);
        }


        Configuration conf = new Configuration();
        conf.set("fs.default.name", "hdfs://localhost:9000");               //for defaulting to HDFS rather than local filesystem
        conf.set("hadoop.security.authentication", "simple");               //disable authentication
        conf.set("hadoop.security.authorization", "false");                 //disable authorization

        Job job = Job.getInstance(conf, "Chunked Indexes");
        job.setJarByClass(CopyOfChunkedIndexes.class);


        Path inputPath = new Path("/user/hadoop-user/sample.txt");
        FileInputFormat.setInputPaths(job, inputPath);

        try{
            FileSystem fs = FileSystem.get(conf);
            DistributedFileSystem dfs = (DistributedFileSystem) fs;
            DFSClient dfsclient = dfs.getClient();



            System.out.println("Proceeding for file: " + inputPath.toString());

            FileStatus fileStatus = fs.getFileStatus(inputPath);
            BlockLocation[] bLocations = fs.getFileBlockLocations(inputPath, 0, fileStatus.getLen());


            for(int i = 0; i < bLocations.length; i++)
            {
                System.out.println("Block[" + + i + "]::");
                System.out.println("\nHost(s): ");

                String[] temp = bLocations[i].getHosts();
                for(int j = 0; j < temp.length; j++)
                {
                    System.out.println(temp[j] + "\t");
                }

                System.out.println("\nBlock length: " + bLocations[i].getLength() +
                                    "\n\nDataNode(s) hosting this block: ");

                temp = bLocations[i].getNames();
                for(int j = 0; j < temp.length; j++)
                {
                    System.out.println(temp[j] + "\t");
                }

                System.out.println("\nOffset: " + bLocations[i].getOffset());

                //READING A BLOCK
                FSDataInputStream in = fs.open(inputPath);
                in.seek(bLocations[i].getOffset());

                byte[] buf = new byte[(int)bLocations[i].getLength()];
                in.read(buf, (int)bLocations[i].getOffset(), (int)bLocations[i].getLength());
                in.close();

                System.out.println(new String(buf, "UTF-8"));
                System.out.println("--------------------------------------------------------------------------------------------");
            }


            //WRITE A FILE TO A SPECIFIC DATANODE
            for(int i = 0; i < bLocations.length; i++)
            {
                System.out.println("Block[" + + i + "]::");
                String[] temp;

                System.out.println("\n\nDataNode(s) hosting this block: ");                        //Name(s) = datanode addresses

                temp = bLocations[i].getNames();
                for(int j = 0; j < temp.length; j++)
                {
                    System.out.println(temp[j].split(":")[0] + "\t" + temp[j].split(":")[1]);      //host vs. port
                }

                Socket sock = SocketFactory.getDefault().createSocket();
                InetSocketAddress targetAddr = new InetSocketAddress(temp[0].split(":")[0], Integer.parseInt(temp[0].split(":")[1]));
                NetUtils.connect(sock, targetAddr, 10000);
                sock.setSoTimeout(10000);

                OutputStream baseStream = NetUtils.getOutputStream(sock, 10000);
                DataOutputStream oStream = new DataOutputStream(new BufferedOutputStream(baseStream, 10000));
                oStream.writeBytes("-----------------------------------------Sample text-----------------------------------------------");


                sock.close();
                System.out.println("Data written, socket closed!");
            }

        }catch(Exception ex){
            ex.printStackTrace();
        }

    }
}

任何有关我出错的地方的帮助都将深表感谢!谢谢!

[ PS:我在Linux VM上使用Hadoop 2.2.0。我在上面的代码中禁用了授权/身份验证,因为我想直接访问datanode(没有&#39;身份验证的开销),因为这是出于测试目的。]

2 个答案:

答案 0 :(得分:0)

群集会丢弃所有编辑内容,因为您没有通过namenode。您的所有修改都被视为文件损坏。

Hadoop已经为您完成了这项工作:当您想在Hadoop集群上执行分布式任务时,会为任务加载最近的数据。例如,如果您有一个弹性搜索集群和一个共享相同硬件的Hadoop集群,您只需创建一个将使用本地弹性搜索节点的mapreduce任务,并且全部:您的数据没有网络舞蹈,所有任务将加载部分数据集并将其推送到本地elasticsearch实例。

享受!

答案 1 :(得分:0)

有一个名为BlockPlacementPolicy的类,它(理论上)可以扩展以定制HDFS如何选择数据节点。虽然hacky,这种方法可能适用于那些希望将来做类似事情的人,并且偶然发现这个问题。