来自https://tour.golang.org/concurrency/10
说明:
在本练习中,您将使用Go的并发功能来并行化Web搜寻器。
修改“抓取”功能以并行获取URL,而无需两次获取相同的URL。
提示:您可以保留已在地图上获取的URL的缓存,但是仅地图并发使用是不安全的!
这是我的答案:
package main
import (
"fmt"
"sync"
)
type Fetcher interface {
// Fetch returns the body of URL and
// a slice of URLs found on that page.
Fetch(url string) (body string, urls []string, err error)
}
var crawledURLs = make(map[string]bool)
var mux sync.Mutex
func CrawlURL(url string, depth int, fetcher Fetcher, quit chan bool) {
defer func() { quit <- true }()
if depth <= 0 {
return
}
mux.Lock()
_, isCrawled := crawledURLs[url]
if isCrawled {
return
}
crawledURLs[url] = true
mux.Unlock()
body, urls, err := fetcher.Fetch(url)
if err != nil {
fmt.Println(err)
return
}
fmt.Printf("found: %s %q\n", url, body)
quitThis := make(chan bool)
for _, u := range urls {
go CrawlURL(u, depth-1, fetcher, quitThis)
}
for range urls {
<-quitThis
}
return
}
// Crawl uses fetcher to recursively crawl
// pages starting with url, to a maximum of depth.
func Crawl(url string, depth int, fetcher Fetcher) {
CrawlURL(url, depth, fetcher, make(chan bool))
return
}
func main() {
Crawl("https://golang.org/", 4, fetcher)
}
// fakeFetcher is Fetcher that returns canned results.
type fakeFetcher map[string]*fakeResult
type fakeResult struct {
body string
urls []string
}
func (f fakeFetcher) Fetch(url string) (string, []string, error) {
if res, ok := f[url]; ok {
return res.body, res.urls, nil
}
return "", nil, fmt.Errorf("not found: %s", url)
}
// fetcher is a populated fakeFetcher.
var fetcher = fakeFetcher{
"https://golang.org/": &fakeResult{
"The Go Programming Language",
[]string{
"https://golang.org/pkg/",
"https://golang.org/cmd/",
},
},
"https://golang.org/pkg/": &fakeResult{
"Packages",
[]string{
"https://golang.org/",
"https://golang.org/cmd/",
"https://golang.org/pkg/fmt/",
"https://golang.org/pkg/os/",
},
},
"https://golang.org/pkg/fmt/": &fakeResult{
"Package fmt",
[]string{
"https://golang.org/",
"https://golang.org/pkg/",
},
},
"https://golang.org/pkg/os/": &fakeResult{
"Package os",
[]string{
"https://golang.org/",
"https://golang.org/pkg/",
},
},
}
输出:
found: https://golang.org/ "The Go Programming Language"
not found: https://golang.org/cmd/
found: https://golang.org/pkg/ "Packages"
found: https://golang.org/pkg/os/ "Package os"
fatal error: all goroutines are asleep - deadlock!
我想知道为什么会发生死锁?是因为我用错误的方式使用频道吗?
注意到我忘记释放if isCrawled {}
分支中的互斥锁,
所以我编辑了这样的代码:
...
if isCrawled {
mux.Unlock() // added this line
return
}
...
但是死锁仍然存在,并且输出是不同的:
found: https://golang.org/ "The Go Programming Language"
not found: https://golang.org/cmd/
found: https://golang.org/pkg/ "Packages"
found: https://golang.org/pkg/os/ "Package os"
found: https://golang.org/pkg/fmt/ "Package fmt"
fatal error: all goroutines are asleep - deadlock!
答案 0 :(得分:1)
主要问题是您忘记释放互斥锁后才返回if isCrawled {}
分支。
此外,如果您确实需要同步goroutine,我建议使用同步API。通道更适合用于通信和共享数据。
这是sync.WaitGroup
:https://play.golang.org/p/slrnmr3sPrs
在这里,您只能使用以下渠道解决方案:https://play.golang.org/p/FbPXxPSXvFL
问题在于,您第一次调用CrawlURL()
时,并没有从作为参数传递的通道中读取内容。因此,一旦该函数尝试通过defer func() { quit <- true }()
向其中发送内容,它将永远阻塞并且永不返回。
package main
import (
"fmt"
"sync"
)
type Fetcher interface {
// Fetch returns the body of URL and
// a slice of URLs found on that page.
Fetch(url string) (body string, urls []string, err error)
}
var crawledURLs = make(map[string]bool)
var mux sync.Mutex
func CrawlURL(url string, depth int, fetcher Fetcher, quit chan bool) {
//For very first function instance, this would block forever if
//nobody is receiving from the other end of this channel
defer func() { quit <- true }()
if depth <= 0 {
return
}
mux.Lock()
_, isCrawled := crawledURLs[url]
if isCrawled {
mux.Unlock()
return
}
crawledURLs[url] = true
mux.Unlock()
body, urls, err := fetcher.Fetch(url)
if err != nil {
fmt.Println(err)
return
}
fmt.Printf("found: %s %q\n", url, body)
quitThis := make(chan bool)
for _, u := range urls {
go CrawlURL(u, depth-1, fetcher, quitThis)
}
for range urls {
<-quitThis
}
return
}
// Crawl uses fetcher to recursively crawl
// pages starting with url, to a maximum of depth.
func Crawl(url string, depth int, fetcher Fetcher) {
lastQuit := make(chan bool)
go CrawlURL(url, depth, fetcher, lastQuit)
//You need to receive from this channel in order to
//unblock the called function
<-lastQuit
return
}
func main() {
Crawl("https://golang.org/", 10, fetcher)
}
// fakeFetcher is Fetcher that returns canned results.
type fakeFetcher map[string]*fakeResult
type fakeResult struct {
body string
urls []string
}
func (f fakeFetcher) Fetch(url string) (string, []string, error) {
if res, ok := f[url]; ok {
return res.body, res.urls, nil
}
return "", nil, fmt.Errorf("not found: %s", url)
}
// fetcher is a populated fakeFetcher.
var fetcher = fakeFetcher{
"https://golang.org/": &fakeResult{
"The Go Programming Language",
[]string{
"https://golang.org/pkg/",
"https://golang.org/cmd/",
},
},
"https://golang.org/pkg/": &fakeResult{
"Packages",
[]string{
"https://golang.org/",
"https://golang.org/cmd/",
"https://golang.org/pkg/fmt/",
"https://golang.org/pkg/os/",
},
},
"https://golang.org/pkg/fmt/": &fakeResult{
"Package fmt",
[]string{
"https://golang.org/",
"https://golang.org/pkg/",
},
},
"https://golang.org/pkg/os/": &fakeResult{
"Package os",
[]string{
"https://golang.org/",
"https://golang.org/pkg/",
},
},
}