我有以下代码应该在matplotlib中绘制给定文本的单词云并将其转换为plotly:
from wordcloud import WordCloud, STOPWORDS
import matplotlib.pyplot as plt
import plotly.graph_objs as go
from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot
import plotly.tools as tls
# Thanks : https://www.kaggle.com/aashita/word-clouds-of-various-shapes ##
def plot_wordcloud(text, mask=None, max_words=200, max_font_size=100, figure_size=(24.0,16.0),
title = None, title_size=40, image_color=False):
stopwords = set(STOPWORDS)
wordcloud = WordCloud(background_color='black',
stopwords = stopwords,
max_words = max_words,
max_font_size = max_font_size,
random_state = 42,
width=800,
height=400,
mask = mask)
wordcloud.generate(str(text))
fig = plt.figure()
plt.imshow(wordcloud)
return tls.mpl_to_plotly(fig)
word_list = "Wikipedia was launched on January 15, 2001, by Jimmy Wales and Larry Sanger.[10] Sanger coined its name,[11][12] as a portmanteau of wiki[notes 3] and 'encyclopedia'. Initially an English-language encyclopedia, versions in other languages were quickly developed. With 5,748,461 articles,[notes 4] the English Wikipedia is the largest of the more than 290 Wikipedia encyclopedias. Overall, Wikipedia comprises more than 40 million articles in 301 different languages[14] and by February 2014 it had reached 18 billion page views and nearly 500 million unique visitors per month.[15] In 2005, Nature published a peer review comparing 42 science articles from Encyclopædia Britannica and Wikipedia and found that Wikipedia's level of accuracy approached that of Britannica.[16] Time magazine stated that the open-door policy of allowing anyone to edit had made Wikipedia the biggest and possibly the best encyclopedia in the world and it was testament to the vision of Jimmy Wales.[17] Wikipedia has been criticized for exhibiting systemic bias, for presenting a mixture of 'truths, half truths, and some falsehoods',[18] and for being subject to manipulation and spin in controversial topics.[19] In 2017, Facebook announced that it would help readers detect fake news by suitable links to Wikipedia articles. YouTube announced a similar plan in 2018."
plot_wordcloud(word_list, title="Word Cloud")
这只会返回一个空白的数字,data
部分中没有任何内容:
Figure({
'data': [],
'layout': {'autosize': False,
'height': 288,
'hovermode': 'closest',
'margin': {'b': 61, 'l': 54, 'pad': 0, 'r': 43, 't': 59},
'showlegend': False,
'width': 432,
'xaxis': {'anchor': 'y',
'domain': [0.0, 1.0],
'mirror': 'ticks',
'nticks': 10,
'range': [-0.5, 799.5],
'showgrid': False,
'showline': True,
'side': 'bottom',
'tickfont': {'size': 10.0},
'ticks': 'inside',
'type': 'linear',
'zeroline': False},
'yaxis': {'anchor': 'x',
'domain': [0.0, 1.0],
'mirror': 'ticks',
'nticks': 10,
'range': [399.5, -0.5],
'showgrid': False,
'showline': True,
'side': 'left',
'tickfont': {'size': 10.0},
'ticks': 'inside',
'type': 'linear',
'zeroline': False}}
})
为什么?以及我该如何解决?
如果我想绘制matplotlib图,它可以正常工作-return fig
返回wordcloud的静态数字。
我试图直接以绘图方式绘制单词云,但是使用go.Scatter
时需要显式提供x和y值-它不能像wordcloud
那样隐式地从plt.imshow
中获取它们。因此,出现“对象不可迭代”错误:
def plot_wordcloud(text, mask=None, max_words=200, max_font_size=100, figure_size=(24.0,16.0),
title = None, title_size=40, image_color=False):
stopwords = set(STOPWORDS)
wordcloud = WordCloud(background_color='black',
stopwords = stopwords,
max_words = max_words,
max_font_size = max_font_size,
random_state = 42,
width=800,
height=400,
mask = mask)
wordcloud.generate(str(text))
data = go.Scatter(dict(wordcloud.generate(str(text))),
mode='text',
text=words,
marker={'opacity': 0.3},
textfont={'size': weights,
'color': colors})
layout = go.Layout({'xaxis': {'showgrid': False, 'showticklabels': False, 'zeroline': False},
'yaxis': {'showgrid': False, 'showticklabels': False, 'zeroline': False}})
fig = go.Figure(data=[data], layout=layout)
return fig
word_list = "Wikipedia was launched on January 15, 2001, by Jimmy Wales and Larry Sanger.[10] Sanger coined its name,[11][12] as a portmanteau of wiki[notes 3] and 'encyclopedia'. Initially an English-language encyclopedia, versions in other languages were quickly developed. With 5,748,461 articles,[notes 4] the English Wikipedia is the largest of the more than 290 Wikipedia encyclopedias. Overall, Wikipedia comprises more than 40 million articles in 301 different languages[14] and by February 2014 it had reached 18 billion page views and nearly 500 million unique visitors per month.[15] In 2005, Nature published a peer review comparing 42 science articles from Encyclopædia Britannica and Wikipedia and found that Wikipedia's level of accuracy approached that of Britannica.[16] Time magazine stated that the open-door policy of allowing anyone to edit had made Wikipedia the biggest and possibly the best encyclopedia in the world and it was testament to the vision of Jimmy Wales.[17] Wikipedia has been criticized for exhibiting systemic bias, for presenting a mixture of 'truths, half truths, and some falsehoods',[18] and for being subject to manipulation and spin in controversial topics.[19] In 2017, Facebook announced that it would help readers detect fake news by suitable links to Wikipedia articles. YouTube announced a similar plan in 2018."
plot_wordcloud(word_list, title="Word Cloud")
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-50-0567281b72b3> in <module>()
---> 48 plot_wordcloud(word_list, title="Word Cloud")
<ipython-input-50-0567281b72b3> in plot_wordcloud(text, mask, max_words, max_font_size, figure_size, title, title_size, image_color)
18
19
---> 20 data = go.Scatter(dict(wordcloud.generate(str(text))),
21 mode='text',
22 text=words,
TypeError: 'WordCloud' object is not iterable
如果我return wordcloud
,则显示为:<wordcloud.wordcloud.WordCloud at 0x1c8faeda748>
。如果有人知道如何解压缩wordcloud
对象,以便我可以将其中的x和y参数输入到go.Figure
中,那也很好(实际上更好)。
只是为了说明解压缩wordcloud
对象是可行的,我可以通过在go.Scatter
中将x和y值的随机数放置在本地来绘制单词云,如下所示:
import random
import plotly.graph_objs as go
def plot_wordcloud(text, mask=None, max_words=200, max_font_size=100, figure_size=(24.0,16.0),
title = None, title_size=40, image_color=False):
stopwords = set(STOPWORDS)
wordcloud = WordCloud(background_color='black',
stopwords = stopwords,
max_words = max_words,
max_font_size = max_font_size,
random_state = 42,
width=800,
height=400,
mask = mask)
wordcloud.generate(str(text))
data = go.Scatter(x=[random.random() for i in range(3000)],
y=[random.random() for i in range(3000)],
mode='text',
text=str(word_list).split(),
marker={'opacity': 0.3},
textfont={'size': weights,
'color': colors})
layout = go.Layout({'xaxis': {'showgrid': False, 'showticklabels': False, 'zeroline': False},
'yaxis': {'showgrid': False, 'showticklabels': False, 'zeroline': False}})
fig = go.Figure(data=[data], layout=layout)
return fig
它不是正确的词云(显然,单词的位置和大小正确定义),应该看起来像这样(用matplotlib.pyplot
绘制的静态词云):
答案 0 :(得分:1)
由于wordcloud
会生成图像,并且plotly的转换功能当前无法处理图像,因此您需要以某种方式从wordcloud.wordcloud.WordCloud
对象的位置,大小和方向重新生成词云。
这些信息存储在.layout_
属性中
wc = Wordcloud(...)
wc.generate(text)
print(wc.layout_)
打印格式为
的元组列表[(word, freq), fontsize, position, orientation, color]
例如在这种情况下
[(('Wikipedia', 1.0), 100, (8, 7), None, 'rgb(56, 89, 140)'),
(('articles', 0.4444444444444444), 72, (269, 310), None, 'rgb(58, 186, 118)'), ...]
因此,原则上,这允许将wordcloud重新生成为文本。但是,必须小心一些小细节。即字体和字体大小必须相同。
这是一个纯matplotlib示例,它使用matplotlib.text.Text
对象来重现单词云。
import numpy as np
from wordcloud import WordCloud, STOPWORDS
from wordcloud.wordcloud import FONT_PATH
import matplotlib.pyplot as plt
from matplotlib.font_manager import FontProperties
word_list = "Wikipedia was launched on January 15, 2001, by Jimmy Wales and Larry Sanger.[10] Sanger coined its name,[11][12] as a portmanteau of wiki[notes 3] and 'encyclopedia'. Initially an English-language encyclopedia, versions in other languages were quickly developed. With 5,748,461 articles,[notes 4] the English Wikipedia is the largest of the more than 290 Wikipedia encyclopedias. Overall, Wikipedia comprises more than 40 million articles in 301 different languages[14] and by February 2014 it had reached 18 billion page views and nearly 500 million unique visitors per month.[15] In 2005, Nature published a peer review comparing 42 science articles from Encyclopædia Britannica and Wikipedia and found that Wikipedia's level of accuracy approached that of Britannica.[16] Time magazine stated that the open-door policy of allowing anyone to edit had made Wikipedia the biggest and possibly the best encyclopedia in the world and it was testament to the vision of Jimmy Wales.[17] Wikipedia has been criticized for exhibiting systemic bias, for presenting a mixture of 'truths, half truths, and some falsehoods',[18] and for being subject to manipulation and spin in controversial topics.[19] In 2017, Facebook announced that it would help readers detect fake news by suitable links to Wikipedia articles. YouTube announced a similar plan in 2018."
def get_wordcloud(width, height):
wc = WordCloud(background_color='black',
stopwords = set(STOPWORDS),
max_words = 200,
max_font_size = 100,
random_state = 42,
width=int(width),
height=int(height),
mask = None)
wc.generate(word_list)
return wc
fig, (ax, ax2) = plt.subplots(nrows=2, sharex=True, sharey=True)
fp=FontProperties(fname=FONT_PATH)
bbox = ax.get_position().transformed(fig.transFigure)
wc = get_wordcloud(bbox.width, bbox.height)
ax.imshow(wc)
ax2.set_facecolor("black")
for (word, freq), fontsize, position, orientation, color in wc.layout_:
color = np.array(color[4:-1].split(", ")).astype(float)/255.
x,y = position
rot = {None : 0, 2: 90}[orientation]
fp.set_size(fontsize*72./fig.dpi)
ax2.text(y,x, word, va="top", ha="left", color=color, rotation=rot,
fontproperties=fp)
print(wc.layout_)
plt.show()
上方的图是通过imshow
显示的wordcloud图片,下方的图是经过重新生成的wordcloud。
现在,您可能想用plotly而不是matplotlib来做同样的事情,但是我对plotly不够了解,无法在此处直接给出解决方案。