我正在使用Amazon EMR和Hive 0.11。我正在尝试创建一个Hive UDF,它将从一个UDF调用返回多个列。
例如,我想调用类似下面的UDF并返回几个(命名)列。
SELECT get_data(columnname) FROM table;
我无法找到关于此操作的文档,但是如果使用Generic UDF,则可以听到。有没有人知道需要从evaluate()方法返回什么才能使其工作?
答案 0 :(得分:3)
我只使用GenericUDTF。在编写了GenericUDTF的udf扩展后,你的udtf应该实现两个重要的方法:初始化和评估。
以下是一个简单的例子:
public class UDFExtractDomainMethod extends GenericUDTF {
private static final Integer OUT_COLS = 2;
//the output columns size
private transient Object forwardColObj[] = new Object[OUT_COLS];
private transient ObjectInspector[] inputOIs;
/**
*
* @param argOIs check the argument is valid.
* @return the output column structure.
* @throws UDFArgumentException
*/
@Override
public StructObjectInspector initialize(ObjectInspector[] argOIs) throws UDFArgumentException {
if (argOIs.length != 1 || argOIs[0].getCategory() != ObjectInspector.Category.PRIMITIVE
|| !argOIs[0].getTypeName().equals(serdeConstants.STRING_TYPE_NAME)) {
throw new UDFArgumentException("split_url only take one argument with type of string");
}
inputOIs = argOIs;
List<String> outFieldNames = new ArrayList<String>();
List<ObjectInspector> outFieldOIs = new ArrayList<ObjectInspector>();
outFieldNames.add("host");
outFieldNames.add("method");
outFieldOIs.add(PrimitiveObjectInspectorFactory.javaStringObjectInspector);
//writableStringObjectInspector correspond to hadoop.io.Text
outFieldOIs.add(PrimitiveObjectInspectorFactory.javaStringObjectInspector);
return ObjectInspectorFactory.getStandardStructObjectInspector(outFieldNames, outFieldOIs);
}
@Override
public void process(Object[] objects) throws HiveException {
try {
//need OI to convert data type to get java type
String inUrl = ((StringObjectInspector)inputOIs[0]).getPrimitiveJavaObject(objects[0]);
URI uri = new URI(inUrl);
forwardColObj[0] = uri.getHost();
forwardColObj[1] = uri.getRawPath();
//output a row with two column
forward(forwardColObj);
} catch (URISyntaxException e) {
e.printStackTrace();
}
}
@Override
public void close() throws HiveException {
}
}