给定一个计算文本中单词的countmap对象:
vocab_counter = countmap(split("the lazy fox jumps over the brown dog"))
[OUT]:
Dict{SubString{String},Int64} with 7 entries:
"brown" => 1
"lazy" => 1
"jumps" => 1
"the" => 2
"fox" => 1
"over" => 1
"dog" => 1
并获得一个字符二元组计数器,每个字:
ngram_word_counter = Dict{Tuple,Dict}()
for (word, count) in vocab_counter
for ng in ngrams(word, n) # bigrams.
if ! haskey(ngram_word_counter, ng) || ! haskey(ngram_word_counter[ng], word)
ngram_word_counter[ng] = Dict{String,Int64}()
ngram_word_counter[ng][word] = 0
end
ngram_word_counter[ng][word] += count
end
end
[ngram_word_counter
]:
Dict{Tuple,Dict} with 20 entries:
('b','r') => Dict("brown"=>1)
('t','h') => Dict("the"=>2)
('o','w') => Dict("brown"=>1)
('z','y') => Dict("lazy"=>1)
('o','g') => Dict("dog"=>1)
('u','m') => Dict("jumps"=>1)
('o','x') => Dict("fox"=>1)
('e','r') => Dict("over"=>1)
('a','z') => Dict("lazy"=>1)
('p','s') => Dict("jumps"=>1)
('h','e') => Dict("the"=>2)
('d','o') => Dict("dog"=>1)
('w','n') => Dict("brown"=>1)
('m','p') => Dict("jumps"=>1)
('l','a') => Dict("lazy"=>1)
('o','v') => Dict("over"=>1)
('v','e') => Dict("over"=>1)
('r','o') => Dict("brown"=>1)
('f','o') => Dict("fox"=>1)
('j','u') => Dict("jumps"=>1)
使用Dict{Tuple, Dict{String,Int64}}
对象,我需要重新循环ngram_word_counter
以获取ngram_counter
没有单词,即Dict{Tuple,Int64}
:
ngram_counter = Dict{Tuple,Int64}()
for ng in keys(ngram_word_counter)
ngram_counter[ng] = sum(values(ngram_word_counter[ng]))
end
[ngram_counter]:
Dict{Tuple,Int64} with 20 entries:
('b','r') => 1
('t','h') => 2
('o','w') => 1
('z','y') => 1
('o','g') => 1
('u','m') => 1
('o','x') => 1
('e','r') => 1
('a','z') => 1
('p','s') => 1
('h','e') => 2
('d','o') => 1
('w','n') => 1
('m','p') => 1
('l','a') => 1
('o','v') => 1
('v','e') => 1
('r','o') => 1
('f','o') => 1
('j','u') => 1
目前,要获得这两个对象,我可以使用以下方式进行临时第二次计数:
function compute_statistics(vocab_counter, n)
ngram_word_counter = Dict{Tuple,Dict}()
for (word, count) in vocab_counter
for ng in ngrams(word, n) # bigrams.
if ! haskey(ngram_word_counter, ng) || ! haskey(ngram_word_counter[ng], word)
ngram_word_counter[ng] = Dict{String,Int64}()
ngram_word_counter[ng][word] = 0
end
ngram_word_counter[ng][word] += count
end
end
ngram_counter = Dict{Tuple,Int64}()
for ng in keys(ngram_word_counter)
ngram_counter[ng] = sum(values(ngram_word_counter[ng]))
end
return ngram_word_counter, ngram_counter
end
或者在第一个循环中同时更新ngram_word_counter
和ngram_counter
:
function compute_statistics(vocab_counter, n)
ngram_word_counter = Dict{Tuple,Dict}()
ngram_counter = Dict{Tuple,Int64}()
for (word, count) in vocab_counter
for ng in ngrams(word, n) # bigrams.
if ! haskey(ngram_word_counter, ng) || ! haskey(ngram_word_counter[ng], word)
ngram_word_counter[ng] = Dict{String,Int64}()
ngram_word_counter[ng][word] = 0
end
ngram_word_counter[ng][word] += count
ngram_counter[ng] += 1
end
end
return ngram_word_counter, ngram_counter
end
ngram_word_counter, ngram_counter
但是在更新KeyError
时我得到ngram_counter
:
KeyError: key ('b','r') not found
我添加了一个额外的检查,它有效:
function compute_statistics(vocab_counter, n)
ngram_word_counter = Dict{Tuple,Dict}()
ngram_counter = Dict{Tuple,Int64}()
for (word, count) in vocab_counter
for ng in ngrams(word, n) # bigrams.
if ! haskey(ngram_word_counter, ng) || ! haskey(ngram_word_counter[ng], word)
ngram_word_counter[ng] = Dict{String,Int64}()
ngram_word_counter[ng][word] = 0
end
if !haskey(ngram_counter, ng)
ngram_counter[ng] = 0
end
ngram_word_counter[ng][word] += count
ngram_counter[ng] += 1
end
end
return ngram_word_counter, ngram_counter
end
ngram_word_counter, ngram_counter
[OUT]:
(Dict{Tuple,Dict}(Pair{Tuple,Dict}(('b','r'),Dict("brown"=>1)),Pair{Tuple,Dict}(('t','h'),Dict("the"=>2)),Pair{Tuple,Dict}(('o','w'),Dict("brown"=>1)),Pair{Tuple,Dict}(('z','y'),Dict("lazy"=>1)),Pair{Tuple,Dict}(('o','g'),Dict("dog"=>1)),Pair{Tuple,Dict}(('u','m'),Dict("jumps"=>1)),Pair{Tuple,Dict}(('o','x'),Dict("fox"=>1)),Pair{Tuple,Dict}(('e','r'),Dict("over"=>1)),Pair{Tuple,Dict}(('a','z'),Dict("lazy"=>1)),Pair{Tuple,Dict}(('p','s'),Dict("jumps"=>1))…),Dict{Tuple,Int64}(Pair{Tuple,Int64}(('b','r'),1),Pair{Tuple,Int64}(('t','h'),1),Pair{Tuple,Int64}(('o','w'),1),Pair{Tuple,Int64}(('z','y'),1),Pair{Tuple,Int64}(('o','g'),1),Pair{Tuple,Int64}(('u','m'),1),Pair{Tuple,Int64}(('o','x'),1),Pair{Tuple,Int64}(('e','r'),1),Pair{Tuple,Int64}(('a','z'),1),Pair{Tuple,Int64}(('p','s'),1)…))
有没有办法在单个循环中同时对Dict {Tuple,Dict {String,Int64}}中的内部字典求和?
答案 0 :(得分:4)
不确定这是否回答了问题,但您可以按照以下方式使compute_statistics
更清晰:
function compute_statistics(vocab_counter, n)
ngram_word_counter = Dict{Tuple,Dict{String,Int}}()
ngram_counter = Dict{Tuple,Int}()
for (word, count) in vocab_counter, ng in ngrams(word,n)
ngram_word_counter[ng] = get(ngram_word_counter,ng,Dict{String,Int}())
ngram_word_counter[ng][word] = get(ngram_word_counter[ng],word,0)+count
ngram_counter[ng] = get(ngram_counter,ng,0)+count
end
return ngram_word_counter, ngram_counter
end
(这使用get
来避免haskey
和更短for
语法)
从ngram_counter
计算ngram_word_counter
的另一种方法是:
ngram_counter = map(x->x[1]=>sum(values(x[2])),ngram_word_counter)
或
ngram_counter = Dict(k=>sum(values(d)) for (k,d) in ngram_word_counter)