在python中对熊猫数据帧进行预处理

时间:2019-10-06 02:46:22

标签: python function dataframe text

我正在尝试进行预处理功能,该功能只允许我传递文本语料库,然后我才能获得该清理文本

from contractions import CONTRACTION_MAP
import re
import nltk
from nltk.corpus import wordnet
from pattern.en import suggest
from nltk.stem import PorterStemmer
data_cleaning():
punkt_token = nltk.PunktSentenceTokenizer()
punkt_token_output = punkt_token.tokenize(sample_text)

word_tokenize = nltk.word_tokenize
word_tokenize_ouput = word_tokenize(sentence)

#Removing Special Character
pattern = re.compile("[{}]".format(re.escape(string.punctuation)))
filter_text =  filter(None,[pattern.sub('' ,c) for c in corpus])
output =  " ".join(filter_text)   
def expand_contractions(text) :
pattern = re.compile(({})".format("|".join(CONTRACTION_MAP.keys())),flags= 
 re.DOTALL| re.IGNORECASE)

        def replace_text(t):
       txt = t.group(0)
    if txt.lower() in CONTRACTION_MAP.keys():
        return CONTRACTION_MAP[txt.lower()]

expand_text = pattern.sub(replace_text,text)
return expand_text 

corpus = expand_contraction = [expand_contractions(c) for c in corpus]

0 个答案:

没有答案