我是hadoop的初学者。我想了解map reduce函数的流程。我有点困惑,我如何通过代码而不是任何文件给地图作业输入。我应该如何配置它。请帮助我。这是我的代码。
public class TemperatureMapper : MapperBase
{
private static int MISSING = 9999;
public override void Map(string line, MapperContext context)
{
//Extract the namespace declarations in the Csharp files
string year = line.Substring(15, 4);
int startIndex = line[87] == '+'?88 : 87;
int airTemp = int.Parse(line.Substring(startIndex, 92 - startIndex));
string quality = line.Substring(92, 1);
Regex r = new Regex(quality, RegexOptions.IgnoreCase);
Match m = r.Match("[01459]");
if (airTemp != MISSING && r.Match("[01459]").Success)
{
context.EmitKeyValue(year.ToString(), airTemp.ToString());
}
}
}
//Reducer
public class TempReducer : ReducerCombinerBase
{
//Accepts each key and count the occurrances
public override void Reduce(string key, IEnumerable<string> values,ReducerCombinerContext context)
{
//Write back
int maxvalue = int.MinValue;
foreach(string value in values)
{
maxvalue = Math.Max(maxvalue, int.Parse(value));
}
context.EmitKeyValue(key, maxvalue.ToString());
}
}
static void Main(string[] args)
{
try
{
string line;
StreamReader file = new StreamReader("temp.txt");
ArrayList al = new ArrayList();
while ((line = file.ReadLine()) != null)
{
al.Add(line);
}
file.Close();
string[] input = (string[])al.ToArray(typeof(string));
Environment.SetEnvironmentVariable("HADOOP_HOME", @"c:\hadoop");
Environment.SetEnvironmentVariable("Java_HOME", @"c:\hadoop\jvm");
var output = StreamingUnit.Execute<TemperatureMapper, TempReducer>(input);//this code is executed successfully
//runnning the job in azure
var hadoop = Hadoop.Connect(); // connected to hadoop successfully
var config = new HadoopJobConfiguration();
hadoop.MapReduceJob.Execute<TemperatureMapper, TempReducer>(config);//how to I provide input here...
Console.ReadLine();
}
我通过流媒体单元获得了正确的结果。现在,我想在天蓝色的地方执行这项工作。那么如何通过代码而不是文件提供输入? 我已通过配置提供输入,即
config.AdditionalStreamingArguments.AddRange(input); //input is array of string
但是当我执行作业时会发生此异常:
The argument must not be empty string.
Parameter name: blobName
答案 0 :(得分:3)
Hadoop MapReduce默认只对文件进行操作(您可以编写一个非基于文件的存储处理程序,但这并不常见)。
如果您尝试将MapReduce应用于在流上生成的内容,而不是HDFS上的文件中存在的内容,您可能希望在YARN上查看类似Storm的内容。