计算数据框每一行中的关键字

时间:2019-04-29 14:51:37

标签: python regex pandas for-loop

我想计算给定数据框列中每行中列表中每个关键字的存在总数。

d = {
    'Column_1': ['mango pret Orange No manner', ' préts No scan '], 
    'Column_2': ['read priority No', 'This is a priority noir '],
    'Column_3': ['No add', 'yep']
}

df = pd.DataFrame(data=d)

list_1 = ['Apple', 'Mango' ,'Orange', 'pr[éeêè]t[s]?']
list_2 = ['weather', 'r[ea]d' ,'p[wr]iority', 'noir?']
list_3 = ['n[eéè]d','snow[s]?', 'blanc?']

dict = {
    "s1": ['Column_1', list_1],
    "s2": ['Column_1', list_3],
    "s3": ['Column_2', list_2],
    "s4": ['Column_3', list_3],
    "s5": ['Column_2','Column_3',list_1]
}

for elt in list(dict.keys()):
    #s1 s2 s3 print(elt)
    if len(dict[elt])<=2:
        d = Counter(re.findall(r'|'.join(dict[elt][1]).lower(), str(df[dict[elt][0]].str.lower())))
        print(d)
        #df[elt] = d 
        sum(d.values())
    elif len(dict[elt])>2:
        aa = Counter(re.findall(r'|'.join(dict[elt][2]).lower(), str(df[dict[elt][0]].str.lower())))
        bb = Counter(re.findall(r'|'.join(dict[elt][2]).lower(), str(df[dict[elt][1]].str.lower())))
        b = sum(bb.values()) 
        a = sum(aa.values()) 
        d = a +b 
        df[elt] = d

我的print(d)的结果低于

Counter({'mango': 1, 'pret': 1, 'orange': 1, 'préts': 1})

如何更改此代码以提供类似下面的数据框df2的信息

d2 = {'s1': [3, 1], 's3':[2,1]}
df2 = pd.DataFrame(data=d2)

1 个答案:

答案 0 :(得分:0)

import pandas as pd
import re

d = {
  'Column_1': [u'mango pret Orange No manner', u' préts No scan '], 
  'Column_2': [u'read priority No', u'This is a priority noir '],
  'Column_3': [u'No add', u'yep']
}

df = pd.DataFrame(data=d)

list_1 = [u'Apple', u'Mango' ,u'Orange', u'pr[éeêè]t[s]?' ]
list_2 = [u'weather', u'r[ea]d' ,u'p[wr]iority', u'noir?' ]
list_3 = [u'n[eéè]d',u'snow[s]?', u'blanc?' ]

my_dict = {
  "s1": ['Column_1', list_1],
  "s2": ['Column_1', list_3],
  "s3": ['Column_2', list_2],
  "s4": ['Column_3', list_3],
  "s5": ['Column_2','Column_3',list_1]
}

d2 = dict()
for key, lst in my_dict.items():
  # Distinguish between columns and regex (assuming regex are stored in lists)
  col_names = filter(lambda x: isinstance(x, str), lst)
  regex_lists = filter(lambda x: isinstance(x, list), lst)
  # Concatenate all regex
  regex_list = reduce(lambda x, y: x+y, regex_lists)
  # For the considered columns, apply regex search in each cell and count
  map_function = lambda cell: len(re.findall(r'|'.join(regex_list).lower(), str(cell).lower()))
  df_regex_count = df[col_names].applymap(map_function)
  # Convert to desired output with lists to make a new dataframe
  d2[key] = map(sum, df_regex_count.values.tolist())

df2 = pd.DataFrame(data=d2)

输出:

    s1  s2  s3  s4  s5
0   3   0   1   0   0
1   1   0   2   0   0

请注意,s3给出[1,2]而不是[2,1],因为r[ea]d不捕获read,而noir?捕获noir