网络爬虫在第一页停止

时间:2021-07-13 08:25:47

标签: image go web-crawler goquery

我正在开发一个应该像这样工作的网络爬虫:

  1. 转到一个网站,抓取该网站的所有链接
  2. 下载所有图片(从起始页开始)
  3. 如果当前页面上没有图像,请转到第 1 步中找到的下一个链接,然后执行第 2 步和第 3 步,直到没有链接/图像。

似乎下面的代码在某种程度上起作用了,就像当我尝试抓取某些网站时,我下载了一些图片。

(即使我不明白我得到的图像,因为我在网站上找不到它们,爬虫似乎不是从网站的起始页开始的)

在几张图像(~25-500)之后,爬虫完成并停止,没有错误,它只是停止。我在多个网站上尝试过这个,在几张图片之后它就停止了。我认为爬虫以某种方式忽略了第 3 步。

package main

import (
    "fmt"
    "io"
    "log"
    "net/http"
    "os"
    "strconv"
    "strings"

    "github.com/PuerkitoBio/goquery"
)

var (
    currWebsite  string = "https://www.youtube.com"
    imageCount   int    = 0
    crawlWebsite string
)

func processElement(index int, element *goquery.Selection) {
    href, exists := element.Attr("href")
    if exists && strings.HasPrefix(href, "http") {
        crawlWebsite = href
        response, err := http.Get(crawlWebsite)
        if err != nil {
            log.Fatalf("error on current website")
        }

        defer response.Body.Close()

        document, err := goquery.NewDocumentFromReader(response.Body)
        if err != nil {
            log.Fatal("Error loading HTTP response body.", err)
        }

        document.Find("img").Each(func(index int, element *goquery.Selection) {
            imgSrc, exists := element.Attr("src")
            if strings.HasPrefix(imgSrc, "http") && exists {
                fileName := fmt.Sprintf("./images/img" + strconv.Itoa(imageCount) + ".jpg")
                currWebsite := fmt.Sprint(imgSrc)
                fmt.Println("[+]", currWebsite)
                DownloadFile(fileName, currWebsite)
                imageCount++
            }
        })
    }
}

func main() {
    err := os.MkdirAll("./images/", 0777)
    if err != nil {
        log.Fatalln("error on creating directory")
    }

    response, err := http.Get(currWebsite)
    if err != nil {
        log.Fatalln("error on searching website")
    }

    defer response.Body.Close()

    document, err := goquery.NewDocumentFromReader(response.Body)
    if err != nil {
        log.Fatalln("Error loading HTTP response body. ", err)
    }

    document.Find("a").Each(processElement)
}

func DownloadFile(filepath string, url string) {
    response, err := http.Get(url)
    if err != nil {
        log.Fatalln("error getting the website infos")
    }
    defer response.Body.Close()

    if response.StatusCode != 200 {
        log.Fatalln("received non 200 response code")
    }

    file, err := os.Create(filepath)
    if err != nil {
        log.Fatalf("error creating file at %v\n", filepath)
    }

    defer file.Close()

    _, err = io.Copy(file, response.Body)
    if err != nil {
        log.Fatalln("error copy file from src to dst")
    }
}

1 个答案:

答案 0 :(得分:1)

<块引用>

(即使我不明白我得到的图像,因为我在网站上找不到它们,爬虫似乎不是从网站的起始页开始的)。

是的,你是对的。您的代码不会从起始页下载图像,因为它从起始页获取的唯一内容是所有锚标记元素,然后为起始页上找到的每个锚元素调用 processElement() -

response, err := http.Get(currWebsite)
if err != nil {
    log.Fatalln("error on searching website")
}

defer response.Body.Close()

document, err := goquery.NewDocumentFromReader(response.Body)
if err != nil {
    log.Fatalln("Error loading HTTP response body. ", err)
}

document.Find("a").Each(processElement) // Here

要从起始页下载所有图像,您应该定义另一个函数 processUrl() 来完成获取 img 元素和下载图像的工作,但是在 processElement() 函数中,您只需要获取href 链接并在该链接上调用 processUrl() -

func processElement(index int, element *goquery.Selection) {
    href, exists := element.Attr("href")
    if exists && strings.HasPrefix(href, "http") {
        crawlWebsite = href
        processUrl(crawlWebsite)
    }
}

func processUrl(crawlWebsite string) {
    response, err := http.Get(crawlWebsite)
    if err != nil {
        log.Fatalf("error on current website")
    }

    defer response.Body.Close()

    document, err := goquery.NewDocumentFromReader(response.Body)
    if err != nil {
        log.Fatal("Error loading HTTP response body.", err)
    }

    document.Find("img").Each(func(index int, element *goquery.Selection) {
        imgSrc, exists := element.Attr("src")
        if strings.HasPrefix(imgSrc, "http") && exists {
            fileName := fmt.Sprintf("./images/img" + strconv.Itoa(imageCount) + ".jpg")
            currWebsite := fmt.Sprint(imgSrc)
            fmt.Println("[+]", currWebsite)
            DownloadFile(fileName, currWebsite)
            imageCount++
        }
    })
}

现在只需在处理所有链接之前从起始页面抓取图像 -

func main() {
    ...
    document, err := goquery.NewDocumentFromReader(response.Body)
    if err != nil {
        log.Fatalln("Error loading HTTP response body. ", err)
    }
    // First crawl images from start page url
    processUrl(currWebsite)
    document.Find("a").Each(processElement)
}