包装类型如何在Hadoop中运行?

时间:2017-10-20 19:12:56

标签: java hadoop serialization mapreduce deserialization

我不是Java专家,但我知道Java的基础知识,而且我总是试图在遇到Java代码时深入理解它。 这可能是一个非常愚蠢的怀疑,但我想在脑海中清楚地理解它 我在Java社区发帖,因为我的疑问仅限于Java。

从最近几个月开始,我正在使用hadoop,并且遇到hadoop使用自己的类型,这些类型包含Java的原始类型,以便提高在序列化的基础上通过网络发送数据的效率和反序列化。

我的困惑从这里开始,让我们说我们在HDFS中有一些数据可以使用在hadoop代码中运行的以下Java代码进行处理

org.apache.hadoop.io.IntWritable;
org.apache.hadoop.io.LongWritable;
org.apache.hadoop.io.Text;
org.apache.hadoop.mapreduce.Mapper;

import java.io.IOException;
public class WordCountMapper
{
extends Mapper<LongWritable,Text,Text,IntWritable>
@Override
public void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException{
}
}
String line = value.toString();
for (String word : line.split(" ")){
if(word.length()>0){
context.write(new Text(word),new IntWritable(1));
}

在此代码中,hadoop的类型类似于LongWritable,Text,IntWritable。
让我们选择包含Java字符串类型的文本类型(如果错误,请纠正我) 我怀疑的是当我们将这些参数传递给上面代码中的方法图时,这些参数如何与import package i.e org.apache.hadoop.io.Text;中的代码进行交互

下面是Text类代码

package org.apache.hadoop.io;

import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.CharBuffer;
import java.nio.charset.CharacterCodingException;
import java.nio.charset.Charset;
import java.nio.charset.CharsetDecoder;
import java.nio.charset.CharsetEncoder;
import java.nio.charset.CodingErrorAction;
import java.nio.charset.MalformedInputException;
import java.text.CharacterIterator;
import java.text.StringCharacterIterator;
import java.util.Arrays;
import org.apache.avro.reflect.Stringable;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Stable;



@Stringable
@InterfaceAudience.Public
@InterfaceStability.Stable
public class Text
  extends BinaryComparable
  implements WritableComparable<BinaryComparable>
{
  private static final Log LOG = LogFactory.getLog(Text.class);

  private static ThreadLocal<CharsetEncoder> ENCODER_FACTORY = new ThreadLocal()
  {
    protected CharsetEncoder initialValue() {
      return Charset.forName("UTF-8").newEncoder().onMalformedInput(CodingErrorAction.REPORT).onUnmappableCharacter(CodingErrorAction.REPORT);
    }
  };



  private static ThreadLocal<CharsetDecoder> DECODER_FACTORY = new ThreadLocal()
  {
    protected CharsetDecoder initialValue() {
      return Charset.forName("UTF-8").newDecoder().onMalformedInput(CodingErrorAction.REPORT).onUnmappableCharacter(CodingErrorAction.REPORT);
    }
  };



  private static final byte[] EMPTY_BYTES = new byte[0];
  private byte[] bytes;
  private int length;

  public Text()
  {
    bytes = EMPTY_BYTES;
  }


  public Text(String string)
  {
    set(string);
  }

  public Text(Text utf8)
  {
    set(utf8);
  }


  public Text(byte[] utf8)
  {
    set(utf8);
  }




  public byte[] getBytes()
  {
    return bytes;
  }

  public int getLength()
  {
    return length;
  }








  public int charAt(int position)
  {
    if (position > length) return -1;
    if (position < 0) { return -1;
    }
    ByteBuffer bb = (ByteBuffer)ByteBuffer.wrap(bytes).position(position);
    return bytesToCodePoint(bb.slice());
  }

  public int find(String what) {
    return find(what, 0);
  }


  public int find(String what, int start)
  {
    try
    {
      ByteBuffer src = ByteBuffer.wrap(bytes, 0, length);
      ByteBuffer tgt = encode(what);
      byte b = tgt.get();
      src.position(start);

      while (src.hasRemaining()) {
        if (b == src.get()) {
          src.mark();
          tgt.mark();
          boolean found = true;
          int pos = src.position() - 1;
          while (tgt.hasRemaining()) {
            if (!src.hasRemaining()) {
              tgt.reset();
              src.reset();
              found = false;

            }
            else if (tgt.get() != src.get()) {
              tgt.reset();
              src.reset();
              found = false;
            }
          }

          if (found) return pos;
        }
      }
      return -1;
    }
    catch (CharacterCodingException e) {
      e.printStackTrace(); }
    return -1;
  }

  public void set(String string)
  {
    try
    {
      ByteBuffer bb = encode(string, true);
      bytes = bb.array();
      length = bb.limit();
    } catch (CharacterCodingException e) {
      throw new RuntimeException("Should not have happened " + e.toString());
    }
  }


  public void set(byte[] utf8)
  {
    set(utf8, 0, utf8.length);
  }

  public void set(Text other)
  {
    set(other.getBytes(), 0, other.getLength());
  }






  public void set(byte[] utf8, int start, int len)
  {
    setCapacity(len, false);
    System.arraycopy(utf8, start, bytes, 0, len);
    length = len;
  }






  public void append(byte[] utf8, int start, int len)
  {
    setCapacity(length + len, true);
    System.arraycopy(utf8, start, bytes, length, len);
    length += len;
  }



  public void clear()
  {
    length = 0;
  }










  private void setCapacity(int len, boolean keepData)
  {
    if ((bytes == null) || (bytes.length < len)) {
      if ((bytes != null) && (keepData)) {
        bytes = Arrays.copyOf(bytes, Math.max(len, length << 1));
      } else {
        bytes = new byte[len];
      }
    }
  }



  public String toString()
  {
    try
    {
      return decode(bytes, 0, length);
    } catch (CharacterCodingException e) {
      throw new RuntimeException("Should not have happened " + e.toString());
    }
  }

  public void readFields(DataInput in)
    throws IOException
  {
    int newLength = WritableUtils.readVInt(in);
    setCapacity(newLength, false);
    in.readFully(bytes, 0, newLength);
    length = newLength;
  }

  public static void skip(DataInput in) throws IOException
  {
    int length = WritableUtils.readVInt(in);
    WritableUtils.skipFully(in, length);
  }




  public void write(DataOutput out)
    throws IOException
  {
    WritableUtils.writeVInt(out, length);
    out.write(bytes, 0, length);
  }

  public boolean equals(Object o)
  {
    if ((o instanceof Text))
      return super.equals(o);
    return false;
  }

我可以知道,当我们运行上面的hadoop代码时,HDFS中的数据会流过我们在map方法中提到的参数。
一旦HDFS的第一个数据集击中Text参数,它如何在org.apache.hadoop.io.Text类中流动?
我的意思是从哪里开始(我假设它从类中的set方法开始,因为它有与所提到的map方法相同的参数,我是否正确?)
它在代码中从普通字符串类型更改为文本类型?

我的第二个疑问是:当数据存储在文本类型中时,谁踢它开始进行血清化?我的意思是谁调用了这个写入(DataOutput out),并且一旦数据到达网络上的目的地,谁调用readFields(DataInput in)? 它是如何工作的,我需要在哪里看?

我希望我所要求的是明确的。

1 个答案:

答案 0 :(得分:0)

与所有网络或磁盘操作一样,所有内容都以字节形式传输。 Text类将字节反序列化为UTF-8。 Writables确定数据的表示方式,Comparables确定数据的排序方式。

作业中设置的InputFormat确定为地图指定的可写入内容或减少任务。

InputSplit确定如何将原始字节流拆分并读入Writables

每个InputSplit

启动一个地图任务

参考https://hadoop.apache.org/docs/stable/hadoop-mapreduce-client/hadoop-mapreduce-client-core/MapReduceTutorial.html