我们正在使用Azure认知搜索为各种文档建立索引,例如Word或PDF文件,这些文件存储在Azure Blob存储中。我们希望能够翻译提取的非英语文档的内容,并将翻译结果存储到索引中的专用字段中。
当前,内置的文本翻译认知技能可支持多达50,000个输入字符。我们拥有的文档最多可以包含1 MB的文本。根据文档,可以使用内置的“拆分技能”将文本拆分为多个块,但是没有技能可以将翻译后的块合并回去。我们的目标是将所有提取的文本转换并存储在Edm.String类型的一个索引字段中,而不是数组。
除了通过Web API为此目的创建自定义认知技能之外,是否有其他方法可以在索引时翻译大文本块?
答案 0 :(得分:2)
是的,Merge Skill实际上会这样做。如下定义您的技能组中的技能。此技能的“文本”和“偏移”输入是可选的,您可以使用“ itemsToInsert”指定要合并在一起的文本(为翻译输出指定适当的来源)。如果要在每个合并的节之前或之后插入一个空格,请使用insertPreTag和insertPostTag。
{
"@odata.type": "#Microsoft.Skills.Text.MergeSkill",
"description": "Merge text back together",
"context": "/document",
"insertPreTag": "",
"insertPostTag": "",
"inputs": [
{
"name": "itemsToInsert",
"source": "/document/translation_output/*/text"
}
],
"outputs": [
{
"name": "mergedText",
"targetName" : "merged_text_field_in_your_index"
}
]
}
答案 1 :(得分:0)
以下是使用Microsoft.Azure.Search类的C#代码段。遵循詹妮弗在上述回复中的建议。
对技能组定义进行了测试,以正确支持大于50k个字符的文本块的翻译。
private static IList<Skill> GetSkills()
{
var skills = new List<Skill>();
skills.AddRange(new Skill[] {
// ...some skills in the pipeline before translation
new ConditionalSkill(
name: "05-1-set-language-code-for-split",
description: "Set compatible language code for split skill (e.g. 'ru' is not supported)",
context: "/document",
inputs: new []
{
new InputFieldMappingEntry(name: "condition", source: SplitLanguageExpression),
new InputFieldMappingEntry(name: "whenTrue", source: "/document/language_code"),
new InputFieldMappingEntry(name: "whenFalse", source: "= 'en'")
},
outputs: new [] { new OutputFieldMappingEntry(name: "output", targetName: "language_code_split") }
),
new SplitSkill
(
name: "05-2-split-original-content",
description: "Split original merged content into chunks for translation",
defaultLanguageCode: SplitSkillLanguage.En,
textSplitMode: TextSplitMode.Pages,
maximumPageLength: 50000,
context: "/document/merged_content_original",
inputs: new []
{
new InputFieldMappingEntry(name: "text", source: "/document/merged_content_original"),
new InputFieldMappingEntry(name: "languageCode", source: "/document/language_code_split")
},
outputs: new [] { new OutputFieldMappingEntry(name: "textItems", targetName: "pages") }
),
new TextTranslationSkill
(
name: "05-3-translate-original-content-pages",
description: "Translate original merged content chunks",
defaultToLanguageCode: TextTranslationSkillLanguage.En,
context: "/document/merged_content_original/pages/*",
inputs: new []
{
new InputFieldMappingEntry(name: "text", source: "/document/merged_content_original/pages/*"),
new InputFieldMappingEntry(name: "fromLanguageCode", source: "/document/language_code")
},
outputs: new [] { new OutputFieldMappingEntry(name: "translatedText", targetName: "translated_text") }
),
new MergeSkill
(
name: "05-4-merge-translated-content-pages",
description: "Merge translated content into one text string",
context: "/document",
insertPreTag: " ",
insertPostTag: " ",
inputs: new []
{
new InputFieldMappingEntry(name: "itemsToInsert", source: "/document/merged_content_original/pages/*/translated_text")
},
outputs: new [] { new OutputFieldMappingEntry(name: "mergedText", targetName: "merged_content_translated") }
),
// ... some skills in the pipeline after translation
});
return skills;
}
private static string SplitLanguageExpression
{
get
{
var values = Enum.GetValues(typeof(SplitSkillLanguage)).Cast<SplitSkillLanguage>();
var parts = values.Select(v => "($(/document/language_code) == '" + v.ToString().ToLower() +"')");
return "= " + string.Join(" || ", parts);
}
}