在Python中使用Stanford CoreNLP进行情感分析

时间:2019-01-15 20:54:23

标签: python stanford-nlp

我正在学习NLP,并且刚刚安装了Stanford CoreNLP。我使用Windows10,并且已将Python3与Anaconda3一起安装。我还安装了pycorenlp-0.3。

我在下载和提取文件的目录中使用以下命令运行CoreNLP。

java -mx4g -cp "*" edu.stanford.nlp.pipeline.StanfordCoreNLPServer -port 9000 -timeout 15000

enter image description here

在Jupyter笔记本中,我运行以下在网络上找到的代码:

import json, requests

class StanfordCoreNLP:

        """
        Modified from https://github.com/smilli/py-corenlp (https://github.com/smilli/py-corenlp)
        """
        def __init__(self, server_url):
            # TODO: Error handling? More checking on the url?
            if server_url[-1] == '/':
                server_url = server_url[:-1]
            self.server_url = server_url

        def annotate(self, text, properties=None):
            assert isinstance(text, str)

            if properties is None:
                properties = {}
            else:
                assert isinstance(properties, dict)

            # Checks that the Stanford CoreNLP server is started.
            try:
                requests.get(self.server_url)
            except requests.exceptions.ConnectionError:
                raise Exception('Check whether you have started the CoreNLP server e.g.\n'
                                    '$ cd <path_to_core_nlp_folder>/stanford-corenlp-full-2016-10-31/ \n'
                    '$ java -mx4g -cp "*" edu.stanford.nlp.pipeline.StanfordCoreNLPServer -port <port>' )

            data = text.encode()
            r = requests.post(
                self.server_url, params={
                        'properties': str(properties)
                }, data=data, headers={'Connection': 'close'})

            output = r.text

            if ('outputFormat' in properties
                and properties['outputFormat'] == 'json'):
                try:
                    output = json.loads(output, encoding='utf-8', strict=True)
                except:
                    pass
            return output

def sentiment_analysis_on_sentence(sentence):
            # The StanfordCoreNLP server is running on http://127.0.0.1:9000 (http://127.0.0.1:9000)
            nlp = StanfordCoreNLP('http://127.0.0.1:9000 (http://127.0.0.1:9000)')
                    # Json response of all the annotations
            output = nlp.annotate(sentence, properties={
                    "annotators": "tokenize,ssplit,parse,sentiment",
                    "outputFormat": "json",
                    # Only split the sentence at End Of Line. We assume that this method only takes in one single sentence.
                    "ssplit.eolonly": "true",
                    # Setting enforceRequirements to skip some annotators and make the process faster
                    "enforceRequirements": "false"
                    })
            # Only care about the result of the first sentence because we assume we only annotate a single sentence 

            return int(output['sentences'][0]['sentimentValue'])

但是,当我在Jupyter Notebook中运行时:

sentiment_analysis_on_sentence('I like the service.')

我得到一个例外:

---------------------------------------------------------------------------
LocationParseError                        Traceback (most recent call last)
C:\ProgramData\Anaconda3\lib\site-packages\requests\models.py in prepare_url(self, url, params)
    370         try:
--> 371             scheme, auth, host, port, path, query, fragment = parse_url(url)
    372         except LocationParseError as e:

C:\ProgramData\Anaconda3\lib\site-packages\urllib3\util\url.py in parse_url(url)
    198             if not port.isdigit():
--> 199                 raise LocationParseError(url)
    200             try:

LocationParseError: Failed to parse: 127.0.0.1:9000 (http:

During handling of the above exception, another exception occurred:

InvalidURL                                Traceback (most recent call last)
<ipython-input-142-e4763a0324a6> in <module>()
----> 1 sentiment_analysis_on_sentence('I like the service.')

<ipython-input-141-9cf27500efe3> in sentiment_analysis_on_sentence(sentence)
     54                     "ssplit.eolonly": "true",
     55                     # Setting enforceRequirements to skip some annotators and make the process faster
---> 56                     "enforceRequirements": "false"
     57                     })
     58             # Only care about the result of the first sentence because we assume we only annotate a single sentence

<ipython-input-141-9cf27500efe3> in annotate(self, text, properties)
     22             # Checks that the Stanford CoreNLP server is started.
     23             try:
---> 24                 requests.get(self.server_url)
     25             except requests.exceptions.ConnectionError:
     26                 raise Exception('Check whether you have started the CoreNLP server e.g.\n'

C:\ProgramData\Anaconda3\lib\site-packages\requests\api.py in get(url, params, **kwargs)
     70 
     71     kwargs.setdefault('allow_redirects', True)
---> 72     return request('get', url, params=params, **kwargs)
     73 
     74 

C:\ProgramData\Anaconda3\lib\site-packages\requests\api.py in request(method, url, **kwargs)
     56     # cases, and look like a memory leak in others.
     57     with sessions.Session() as session:
---> 58         return session.request(method=method, url=url, **kwargs)
     59 
     60 

C:\ProgramData\Anaconda3\lib\site-packages\requests\sessions.py in request(self, method, url, params, data, headers, cookies, files, auth, timeout, allow_redirects, proxies, hooks, stream, verify, cert, json)
    492             hooks=hooks,
    493         )
--> 494         prep = self.prepare_request(req)
    495 
    496         proxies = proxies or {}

C:\ProgramData\Anaconda3\lib\site-packages\requests\sessions.py in prepare_request(self, request)
    435             auth=merge_setting(auth, self.auth),
    436             cookies=merged_cookies,
--> 437             hooks=merge_hooks(request.hooks, self.hooks),
    438         )
    439         return p

C:\ProgramData\Anaconda3\lib\site-packages\requests\models.py in prepare(self, method, url, headers, files, data, params, auth, cookies, hooks, json)
    303 
    304         self.prepare_method(method)
--> 305         self.prepare_url(url, params)
    306         self.prepare_headers(headers)
    307         self.prepare_cookies(cookies)

C:\ProgramData\Anaconda3\lib\site-packages\requests\models.py in prepare_url(self, url, params)
    371             scheme, auth, host, port, path, query, fragment = parse_url(url)
    372         except LocationParseError as e:
--> 373             raise InvalidURL(*e.args)
    374 
    375         if not scheme:

InvalidURL: Failed to parse: 127.0.0.1:9000 (http:

我该如何解决?

1 个答案:

答案 0 :(得分:0)

第48行

nlp = StanfordCoreNLP('http://127.0.0.1:9000 (http://127.0.0.1:9000)')

您应该在第一个带有端口号的URL之后删除此(http://127.0.0.1:9000)

其他步骤:

正如您在命令行日志中看到的那样,Stanford NLP使用lexparser而不是ShiftReduce Parser。 这很好,因为情感分析和Shiftreduce解析器当前具有探针一起使用。 为确保两者均正常工作,您应该添加

    "parse.model": "edu/stanford/nlp/models/lexparser/englishPCFG.ser.gz"

调用属性(请参阅第50 f。行)。

对于德国用户: 如果使用错误的语言环境启动了Stanford CoreNLP Server,但最终收到的错误消息仍然是错误的。 然后,应将以下paramters添加到服务器启动位置:

"-Duser.language=en -Duser.country=US Default"