我正在尝试将列表的每个元素写在不同的文件中。
假设我们有一个清单:
dataset = ['abc', 'def', 'ghi']
我想遍历列表并根据列表的长度创建文本文件。因此,在这种情况下,应该有3个文本文件,每个文件将分别包含内容abc,def和ghi。
我目前的代码如下:
# This will read a text file, normalize it and remove stopwords from it using nltk.
import nltk, io, math
from nltk.corpus import stopwords
# Read raw text
targetFile = open('text.txt')
rawtext = targetFile.read()
# Removing stopwords
stops = set(stopwords.words('english'))
filtered_text = [i for i in rawtext.lower().split() if i not in stops]
# Count Number of words
total_words = len(filtered_text)
# Divide them equally into 10 different lists
chunk_size = math.floor(total_words/10)
n_lists_of_words = [filtered_text[i:i + chunk_size] for i in range(0, len(filtered_text), chunk_size)]
if(len(n_lists_of_words) > 10):
del n_lists_of_words[-1]
# Lets make list of strings instead of list of lists
list_of_str = [' '.join(x) for x in n_lists_of_words]
# Create 10 different files from above 10 elements of n_list_of_words list
for index, word in enumerate(n_lists_of_words):
with io.FileIO("output_text_" + str(index) + ".txt", "w") as file:
file.write(bytes(word), 'UTF-8')
错误讯息:
Traceback (most recent call last):
File "clean_my_text.py", line 35, in <module>
file.write(bytes(word), 'UTF-8')
TypeError: 'str' object cannot be interpreted as an integer
答案 0 :(得分:1)
你的代码有点不对劲。 这是最后一行更正。 file.write(bytes(dataset [count],&#39; UTF-8&#39;))
答案 1 :(得分:0)
谢谢大家。能做到这一点。以下是以下解决方案,请随时询问相关信息:
# This will read a text file, normalize it and remove stopwords from it using nltk.
import nltk, io, math
from nltk.corpus import stopwords
from string import punctuation
# Read raw text
targetFile = open('input_text.txt')
rawtext = targetFile.read()
# Remove punctuation
def strip_punctuation(s):
return ''.join(c for c in s if c not in punctuation)
filtered_punc = strip_punctuation(rawtext)
print(filtered_punc)
# Removing stopwords
stops = set(stopwords.words('english'))
filtered_text = [i for i in filtered_punc.lower().split() if i not in stops]
# Count Number of words
total_words = len(filtered_text)
# Divide them equally into 10 different lists
chunk_size = math.floor(total_words/10)
n_lists_of_words = [filtered_text[i:i + chunk_size] for i in range(0, len(filtered_text), chunk_size)]
if(len(n_lists_of_words) > 10):
del n_lists_of_words[-1]
# Lets make list of strings instead of list of lists
list_of_str = [' '.join(x) for x in n_lists_of_words]
# Print list values in seperate files
for index, word in enumerate(list_of_str):
with open("Output" + str(index) + ".txt", "w") as text_file:
print(word, file=text_file)