如何在Python包elasticsearch-dsl中使用Elasticsearch Ingest Attachment Processor Plugin

时间:2017-10-28 09:41:13

标签: python python-3.x elasticsearch elasticsearch-plugin elasticsearch-dsl

我在尝试将Ingest Attachment Processor Plugin与ElasticSearch(AWS上的5.5,5.6本地)一起使用时遇到了麻烦。我正在使用Python(3.6)开发并使用elasticsearch-dls library

我正在使用Persistence并让我的课程设置如下:

import base64
from elasticsearch_dsl.field import Attachment, Text
from elasticsearch_dsl import DocType, analyzer

lower_keyword = analyzer('keyword', tokenizer="keyword", filter=["lowercase"])

class ExampleIndex(DocType):
class Meta:
    index = 'example'
    doc_type = 'Example'

    id = Text()
    name = Text(analyzer=lower_keyword)
    my_file = Attachment()

然后我有一个这样的函数,我调用它来创建索引并保存文档。

def index_doc(a_file):
    # Ensure that the Index is created before any documents are saved
    try:
        i = Index('example')
        i.doc_type(ExampleIndex)
        i.create()

        # todo - Pipeline creation needs to go here - But how do you do it?

    except Exception:
        pass

    # Check for existing index
    indices = ExampleIndex()
    try:
        s = indices.search()
        r = s.query('match', name=a_file.name).execute()
        if r.success():
            for h in r:
                indices = ExampleIndex.get(id=h.meta.id)
                break
    except NotFoundError:
        pass
    except Exception:
        logger.exception("Something went wrong")
        raise

    # Populate the document   
    indices.name = a_file.name
    with open(a_file.path_to_file, 'rb') as f:
        contents = f.read()
    indices.my_file = base64.b64encode(contents).decode("ascii")

    indices.save(pipeline="attachment") if indices.my_file else indices.save()

我有一个包含内容的文本文件这是一个测试文档。当它的内容是base64编码时,它们变成 VGhpcyBpcyBhIHRlc3QgZG9jdW1lbnQK

如果我直接使用CURL,那么它可以工作:

创建pipline:

curl -XPUT 'localhost:9200/_ingest/pipeline/attachment?pretty' -H 'Content-Type: application/json' -d' {   "description" : "Extract attachment information",   "processors" : [
    {
      "attachment" : {
        "field" : "my_file"
      }
    }   ] }

放入数据

curl -XPUT 'localhost:9200/example/Example/AV9nkyJMZAQ2lQ3CtsLb?pipeline=attachment&pretty'\
-H 'Content-Type: application/json' \
-d '{"my_file": "VGhpcyBpcyBhIHRlc3QgZG9jdW1lbnQK"}'

获取数据 http://localhost:9200/example/Example/AV9nkyJMZAQ2lQ3CtsLb?pretty

{
    "_index" : "example",
    "_type" : "Example",
    "_id" : "AV9nkyJMZAQ2lQ3CtsLb",
    "_version" : 4,
    "found" : true,
    "_source" : {
        "my_file" : "VGhpcyBpcyBhIHRlc3QgZG9jdW1lbnQK",
        "attachment" : {
            "content_type" : "text/plain; charset=ISO-8859-1",
            "language" : "en",
            "content" : "This is a test document",
            "content_length" : 25
        }
    }
}

麻烦的是我无法看到如何使用elasticsearch-dsl Python库重新创建它

更新 除了最初创建管道之外,我现在可以完成所有工作。如果我使用CURL创建管道,那么我可以通过简单地将.save()方法调用更改为.save(pipeline =“attachment”)来使用它。我已经更新了我之前的功能,以显示这一点以及评论创建pipline所需的位置。

以下是创建管道的CURL实现的示例

curl - XPUT 'localhost:9200/_ingest/pipeline/attachment?pretty' \
     - H 'Content-Type: application/json' \
     - d '"description": "Extract attachment information","processors": [{"attachment": {"field": "my_field"}}]}'

1 个答案:

答案 0 :(得分:2)

问题的答案是在使用之前使用较低级别elasticseatch.py​​库中的IngestClient来创建管道。

from elasticsearch.client.ingest import IngestClient
p = IngestClient(es_connection)
p.put_pipeline(id='attachment', body={
    'description': "Extract attachment information",
    'processors': [
        {"attachment": {"field": "cv"}}
    ]
})

使用elasticsearch-dsl持久性流程(DocType)在ElasticSearch中创建管道,索引和文档的完整工作示例是:

import base64
from uuid import uuid4
from elasticsearch.client.ingest import IngestClient
from elasticsearch.exceptions import NotFoundError
from elasticsearch_dsl import analyzer, DocType, Index
from elasticsearch_dsl.connections import connections
from elasticsearch_dsl.field import Attachment, Text


# Establish a connection
host = '127.0.0.1'
port = 9200
es = connections.create_connection(host=host, port=port)

# Some custom analyzers
html_strip = analyzer('html_strip', tokenizer="standard", filter=["standard", "lowercase", "stop", "snowball"],
                      char_filter=["html_strip"])
lower_keyword = analyzer('keyword', tokenizer="keyword", filter=["lowercase"])


class ExampleIndex(DocType):
    class Meta:
        index = 'example'
        doc_type = 'Example'

    id = Text()
    uuid = Text()
    name = Text()
    town = Text(analyzer=lower_keyword)
    my_file = Attachment(analyzer=html_strip)


def save_document(doc):
    """

    :param obj doc: Example object containing values to save
    :return:
    """
    try:
        # Create the Pipeline BEFORE creating the index
        p = IngestClient(es)
        p.put_pipeline(id='myattachment', body={
            'description': "Extract attachment information",
            'processors': [
                {
                    "attachment": {
                        "field": "my_file"
                    }
                }
            ]
        })

        # Create the index. An exception will be raise if it already exists
        i = Index('example')
        i.doc_type(ExampleIndex)
        i.create()
    except Exception:
        # todo - should be restricted to the expected Exception subclasses
        pass

    indices = ExampleIndex()
    try:
        s = indices.search()
        r = s.query('match', uuid=doc.uuid).execute()
        if r.success():
            for h in r:
                indices = ExampleIndex.get(id=h.meta.id)
                break
    except NotFoundError:
        # New record
        pass
    except Exception:
        print("Unexpected error")
        raise

    # Now set the doc properties
    indices.uuid = doc.uuid
    indices.name = doc.name
    indices.town = doc.town
    if doc.my_file:
        with open(doc.my_file, 'rb') as f:
            contents = f.read()
        indices.my_file = base64.b64encode(contents).decode("ascii")

    # Save the index, using the Attachment pipeline if a file was attached
    return indices.save(pipeline="myattachment") if indices.my_file else indices.save()


class MyObj(object):
    uuid = uuid4()
    name = ''
    town = ''
    my_file = ''

    def __init__(self, name, town, file):
        self.name = name
        self.town = town
        self.my_file = file


me = MyObj("Steve", "London", '/home/steve/Documents/test.txt')

res = save_document(me)