Tensorflow使内存Google Colab崩溃

时间:2020-02-21 18:40:51

标签: python memory-management tokenize tensorflow2.0

在预处理数据的同时,我正在研究阿拉伯方言的词嵌入(例如某个地区的语):

  1. 加载json
  2. 提取线条
  3. 清除网址,表情符号和其他内容
  4. 删除任何列表中的最少2个字
  5. 创建上下文,目标为2的窗口
  6. 使用tf.keras.preprocessing.text.Tokenizer fit_on_texts
    1. 问题:从文本创建到矩阵

我要从第7步开始进行onehot编码,以便可以将其馈送到网络

def loadJson(file):
  import json
  lines=[]
  with open(file) as f:
    for line in f:
      lines.append(json.loads(line))
  return lines

def extractSentences(lines,language):
  posts=[]
  comments=[]
  for line in lines:
    if line['language']==language:
      posts.append(line['message'])
    for j in line['comments']:
      if j['language']==language:
        comments.append(j['message'])
  return posts, comments

def removeSpecialChar(posts):
  import re
  def remov(p):
    l=re.sub(' {2,}',' ',re.sub('[^ـابتةثجحخدذرزىسشصضطظعغفقكلمنهويءآأؤإئّّّّّ:ّّّّّ]',' ',re.sub('َ|ً|ُ|ٌ|ِ|ٍ|ْ','',r""+p.strip())))
    myre = re.compile(u'['
    u'\U0001F300-\U0001F64F'
    u'\U0001F680-\U0001F6FF'
    u'\u2600-\u26FF\u2700-\u27BF]+', 
    re.UNICODE)
    return myre.sub('',l)
  return list(map(remov,posts))

def delEmpty(posts,size=2):
  while True:
    p=len(posts)
    for j,i in enumerate(posts):
      if len(i.split(' '))<2:
        #print(i.split(' '))
        del posts[j]
    if p-len(posts)==0:
      return 

def contextAndTarget(posts,k=2):
  import numpy as np
  context = []
  target  = []
  for j,i in enumerate(posts):
    ul = [ k for k in i.split(' ') if len(k)>2]
    for handel in range(len(ul)-1):
      for e in range(k):
        if e+handel<len(ul):
          context.append(ul[handel])
          target.append(ul[e+handel])
  X = []
  X.extend(target)
  X.extend(context)
  Y = []
  Y.extend(context)
  Y.extend(target)
  return X,Y

之后,我将处理应用于json文件nd,并执行所有步骤

lines=loadJson('data.json')
posts,comments=extractSentences(lines,'ARABIC')
posts=removeSpecialChar(posts)
delEmpty(posts)
X,Y=contextAndTarget(posts)

tokenPosts=preprocessing.text.Tokenizer() 
tokenPosts.fit_on_texts(X)
vocab_size=len(tokenPosts.word_counts)+1

#just right here it crashes nd the RAM increase suddenly 

xLines,yLines=tokenPosts.texts_to_matrix (X),tokenPosts.texts_to_matrix (Y)

0 个答案:

没有答案