Go lang Tour Webcrawler练习 - 解决方案

时间:2016-04-07 14:45:04

标签: go concurrency

我是Go的新手,为了研究,我必须在Go中进行关于并发性的演示。 我认为Go lang Tour - Webcrawler练习就是一个很好的例子。 在我这样做之前,如果有人能够验证这个解决方案是否适合,那就太好了。 我认为这是正确的,但也许我错过了任何东西,或者你们中的任何人有更好的选择。

这是我的代码:

package main

import (
    "fmt"
    "sync"
    "strconv"
    "time"
)

/*
 * Data and Types
 * ===================================================================================
 */
var fetched map[string]bool    // Map of fetched URLs -> true: fetched
var lock sync.Mutex            // locks write access to fetched-map
var urlChan chan string        // Channel to Write fetched URL

type Fetcher interface {
    // Fetch returns the body of URL and
    // a slice of URLs found on that page.
    Fetch(url string) (body string, urls []string, err error)
}

// fakeFetcher is Fetcher that returns canned results.
type fakeFetcher map[string]*fakeResult

type fakeResult struct {
    body string
    urls []string
}

// fetcher is a populated fakeFetcher.
var fetcher = fakeFetcher{
    "http://golang.org/": &fakeResult{
        "The Go Programming Language",
        []string{
            "http://golang.org/pkg/",
            "http://golang.org/cmd/",
        },
    },
    "http://golang.org/pkg/": &fakeResult{
        "Packages",
        []string{
            "http://golang.org/",
            "http://golang.org/cmd/",
            "http://golang.org/pkg/fmt/",
            "http://golang.org/pkg/os/",
        },
    },
    "http://golang.org/pkg/fmt/": &fakeResult{
        "Package fmt",
        []string{
            "http://golang.org/",
            "http://golang.org/pkg/",
        },
    },
    "http://golang.org/pkg/os/": &fakeResult{
        "Package os",
        []string{
            "http://golang.org/",
            "http://golang.org/pkg/",
        },
    },
}
/*
 * End Data and Types
 * ===================================================================================
 */

/*
 * Webcrawler implementation
 * ===================================================================================
 */
func waitUntilDone(d int) {

    fMap := make(map[string]string)

    for i := 0; i < d; i++ {
        fMap[<-urlChan] = strconv.Itoa(time.Now().Nanosecond())
    }

    time.Sleep(time.Millisecond * 100)

    fmt.Println()
    fmt.Println("Fetch stats")
    fmt.Println("==================================================================")
    for k, v := range fMap {
        fmt.Println("Fetched: " + k + " after: " + v + " ns")
    }
    fmt.Println("==================================================================")
    fmt.Println()

}

func (f fakeFetcher) Fetch(url string) (string, []string, error) {
    var str string
    var strArr [] string
    var err error

    if fetched[url] {
        // already fetched?
        str, strArr, err = "", nil, fmt.Errorf("already fetched: %s this will be ignored", url)

    }else if res, ok := f[url]; ok {
        str, strArr, err = res.body, res.urls, nil
        urlChan <- url

    }else {
        str, strArr, err = "", nil, fmt.Errorf("not found: %s", url)
    }

    return str, strArr, err
}

// Crawl uses fetcher to recursively crawl
// pages starting with url, to a maximum of depth.
func Crawl(url string, depth int, fetcher Fetcher, goRoutNum int) {

    if depth <= 0 {
        return
    }

    // Start fetching url concurrently
    fmt.Println("Goroutine " + strconv.Itoa(goRoutNum) + " is fetching: " + url)
    body, urls, err := fetcher.Fetch(url)

    if err != nil {
        fmt.Println(err)
        return
    }

    // Lock map
    lock.Lock()
    fetched[url] = true

    // Unlock
    lock.Unlock()

    fmt.Printf("found: %s %q\n", url, body)

    for i, u := range urls {
        go func(url string, goRoutNumber int) {
            Crawl(url, depth - 1, fetcher, goRoutNumber)
        }(u, i + 1)
    }
    return
}

func StartCrawling(url string, depth int, fetcher Fetcher) {

    fmt.Println()
    fmt.Println("Start crawling ...")
    fmt.Println("==================================================================")

    go func(u string, i int, f Fetcher) {
        Crawl(u, i, f, 0)

    }(url, depth, fetcher)
}
/*
 * End Webcrawler implementation
 * ===================================================================================
 */


/*
 * Main
 * ====================================================================
 */
func main() {

    depth := len(fetcher)
    fetched = make(map[string]bool)
    url := "http://golang.org/"
    urlChan = make(chan string, len(fetcher))
    go StartCrawling(url, depth, fetcher)
    waitUntilDone(depth)

}

/*
 * End Main
 * =====================================================================
 */

游乐场:https://play.golang.org/p/GHHt5I162o 练习链接:https://tour.golang.org/concurrency/10

0 个答案:

没有答案