如何使用抓取工具抓取网站的所有内部网址?

时间:2018-05-03 11:36:26

标签: node.js web-crawler

我想在node.js中使用抓取工具抓取网站中的所有链接(内部链接)并获取每个页面的标题,我在npm crawler上看到此插件,如果我查看文档有以下示例:

var Crawler = require("crawler");

var c = new Crawler({
   maxConnections : 10,
   // This will be called for each crawled page
   callback : function (error, res, done) {
       if(error){
           console.log(error);
       }else{
           var $ = res.$;
           // $ is Cheerio by default
           //a lean implementation of core jQuery designed specifically for the server
           console.log($("title").text());
       }
       done();
   }
});

// Queue just one URL, with default callback
c.queue('http://balenol.com');

但我真正想要的是抓取网站中的所有内部网址并将其内置到此插件中,还是需要单独编写?我没有在插件中看到任何选项访问网站中的所有链接,这可能吗?

2 个答案:

答案 0 :(得分:4)

以下代码段会抓取找到的每个网址中的所有网址。

const Crawler = require("crawler");

let obselete = []; // Array of what was crawled already

let c = new Crawler();

function crawlAllUrls(url) {
    console.log(`Crawling ${url}`);
    c.queue({
        uri: url,
        callback: function (err, res, done) {
            if (err) throw err;
            let $ = res.$;
            try {
                let urls = $("a");
                Object.keys(urls).forEach((item) => {
                    if (urls[item].type === 'tag') {
                        let href = urls[item].attribs.href;
                        if (href && !obselete.includes(href)) {
                            href = href.trim();
                            obselete.push(href);
                            // Slow down the
                            setTimeout(function() {
                                href.startsWith('http') ? crawlAllUrls(href) : crawlAllUrls(`${url}${href}`) // The latter might need extra code to test if its the same site and it is a full domain with no URI
                            }, 5000)

                        }
                    }
                });
            } catch (e) {
                console.error(`Encountered an error crawling ${url}. Aborting crawl.`);
                done()

            }
            done();
        }
    })
}

crawlAllUrls('https://github.com/evyatarmeged/');

答案 1 :(得分:0)

在上面的代码中,只需更改以下内容即可获取网站的内部链接...

来自

href.startsWith('http') ? crawlAllUrls(href) : crawlAllUrls(`${url}${href}`)

href.startsWith(url) ? crawlAllUrls(href) : crawlAllUrls(`${url}${href}`)