如何使用Puppeteer抓取Reddit页面?

时间:2019-03-31 07:40:26

标签: javascript node.js dom web-scraping puppeteer

我正在尝试学习使用Puppeteer抓取Reddit页面。新的reddit具有动态添加的内容和无限滚动的功能。我从代码中获得了非常不一致的结果,并且在调试和弄清楚如何使此工作变得困难时。

server.js主文件,这里没有太多内容。

'use strict';

const express = require('express');
const cors = require('cors');
const app = express();
const cheerio = require('./redditScraper');

app.use(express.json());
app.use(
    cors({
        origin: ['http://localhost:3000']
    })
);

app.get('/', (req, res) => {
    let { dynamicScraper } = cheerio;

    dynamicScraper()
        .then(html => {
            console.log('data was sent');
            res.json(html);
        })
        .catch(err => {
            console.log(err);
        });
});

app.listen(process.env.PORT || 8080, () => {
    console.log(`Your app is listening on port ${process.env.PORT || 8080}`);
});

包含刮板材料的文件

'use strict';

const rp = require('request-promise');
const $ = require('cheerio');
const puppeteer = require('puppeteer');
const url2 = 'https://www.reddit.com/r/GameDeals/';



const cheerio = {
    dynamicScraper: function() {
       return puppeteer
            .launch()
            .then(browser => {
                return browser.newPage();
            })
            .then(page => {
                return page.goto(url2)
                    .then(() => {
                        //attempting to scroll through page to cause loading in more elements, doesn't seem to work
                        page.evaluate(() => {
                            window.scrollBy(0, window.innerHeight);
                        })
                        return page.content()
                    });
            })
            .then(html => {
                //should log the the first post's a tag's href value
                console.log($('.scrollerItem div:nth-of-type(2) article div div:nth-of-type(3) a', html).attr('href'));

                const urls = [];

                //should be the total number of a tag's across all posts
                const numLinks = $('.scrollerItem div:nth-of-type(2) article div div:nth-of-type(3) a', html).attr('href').length;

                const links = $('.scrollerItem div:nth-of-type(2) article div div:nth-of-type(3) a', html);

                //was using numLinks as increment counter but was getting undefined, as the pages only seems to load inconsistent between 10-20 elements
                for (let i=0; i<10; i++) {
                    urls.push(links[i].attribs.href);
                }

                console.log('start of urls:', urls);
                console.log('scraped urls:', urls.length);
                console.log('intended number of urls to be scraped:', numLinks);
                // console.log($('.scrollerItem div:nth-of-type(2) article div div:nth-of-type(3) a', html).length);
            })
            .catch(err => console.log(err));
    }

}

module.exports = cheerio;

上面的代码当前有效,但是从注释中可以看到,我将计数器硬编码为10,这显然不是页面上<a href=#>的总数。

以下是上面的输出:

[nodemon] starting `node server.js`
Your app is listening on port 8080
https://www.gamebillet.com/garfield-kart
start of urls: [ 'https://www.gamebillet.com/garfield-kart',
  'https://www.humblebundle.com/store/deep-rock-galactic?hmb_source=humble_home&hmb_medium=product_tile&hmb_campaign=mosaic_section_1_layout_index_9_layout_type_twos_tile_index_1',  'https://www.greenmangaming.com/games/batman-arkham-asylum-game-of-the-year/',
  'https://www.humblebundle.com/store/ftl-faster-than-light',
  'https://www.greenmangaming.com/vip/vip-deals/',
  'https://store.steampowered.com/app/320300/',
  'https://store.steampowered.com/app/356650/Deaths_Gambit/',
  'https://www.chrono.gg/?=Turmoil',
  'https://www.fanatical.com/en/game/slain',
  'https://freebies.indiegala.com/super-destronaut/?dev_id=freebies' ]
scraped urls: 10
numLinks: 40
data was sent

这是当for循环更改为numlinks

时的输出
for (let i=0; i<numLinks; i++) {
    urls.push(links[i].attribs.href);
}
[nodemon] starting `node server.js`
Your app is listening on port 8080
https://www.gamebillet.com/garfield-kart
TypeError: Cannot read property 'attribs' of undefined
    at puppeteer.launch.then.then.then.html (/file.js:49:40)
    at process._tickCallback (internal/process/next_tick.js:68:7)
data was sent

我希望阅读的内容不会太混乱。我将不胜感激任何帮助。谢谢。

更新/编辑:

我正在尝试实现异步方式,但是我不确定如何返回要在路由处理程序中使用的值?

    dynamicScraper: function() {
        async function f() {
            const browser = await puppeteer.launch();
            const [page] = await browser.pages();

            await page.goto(url2, { waitUntil: 'networkidle0' });
            const links = await page.evaluate(async () => {
                const scrollfar = document.body.clientHeight;
                console.log(scrollfar); //trying to find the height
                window.scrollBy(0, scrollfar);
                await new Promise(resolve => setTimeout(resolve, 5000)); 
                return [...document.querySelectorAll('.scrollerItem div:nth-of-type(2) article div div:nth-of-type(3) a')]
                    .map((el) => el.href);
            });
            console.log(links, links.length);

            await browser.close();
            //how do I return the value to pass to the route handler?
            return (links);
        };
        return(f());
    }

我正在从console.log回来。

Your app is listening on port 8080
[ 'https://www.nintendo.com/games/detail/celeste-switch',
  'https://store.steampowered.com/app/460790/Bayonetta/',
  'https://www.greenmangaming.com/de/vip/vip-deals/']

但是服务器到客户端的响应是浏览器上的空对象 {}

更新/编辑2:

没关系,认为它是处理异步功能的承诺所必需的。

dynamicScraper().then((output) => {
        res.json(output);
    });

1 个答案:

答案 0 :(得分:1)

您的代码有几个问题:

page.goto(url2)

默认情况下,page.goto仅等待load事件。将其更改为page.goto(url2, { waitUntil: 'networkidle0' })将等待所有请求完成。

page.evaluate(() => {
    window.scrollBy(0, window.innerHeight);
})

您在该语句之前缺少await(或者您需要将其嵌入到您的Promise流中)。此外,您不会滚动到页面的末尾,而只会滚动到窗口的高度。您应该使用document.body.clientHeight滚动到页面的末尾。

此外,滚动后还必须等待一段时间(或期望的选择器)。您可以使用此代码等待一秒钟,以便页面有足够的时间加载更多内容:

new Promise(resolve => setTimeout(resolve, 5000))

关于您的一般想法,我建议仅使用puppeteer而不是使用puppeteer进行导航,然后提取所有数据并将其放入cheerio。如果仅使用puppeteer,则您的代码可能会像这样简单(您仍然需要将其包装到函数中):

const puppeteer = require('puppeteer');

(async () => {
    const browser = await puppeteer.launch();
    const [page] = await browser.pages();

    await page.goto('https://www.reddit.com/r/GameDeals/', { waitUntil: 'networkidle0' });
    const links = await page.evaluate(async () => {
        window.scrollBy(0, document.body.clientHeight);
        await new Promise(resolve => setTimeout(resolve, 5000)); // wait for some time, you might need to figure out a good value for this yourself
        return [...document.querySelectorAll('.scrollerItem div:nth-of-type(2) article div div:nth-of-type(3) a')]
            .map((el) => el.href);
    });
    console.log(links, links.length);

    await browser.close();
})();