在此tutorial之后,我正在使用cheerio.js和puppeteer.js尝试进行一些基本的Web抓取。我并没有完全按照教程进行操作,而是试图在服务器端编写它,以使后端处理所有抓取操作,然后在将来将数据传递给前端。
现在写的是
[nodemon] restarting due to changes...
[nodemon] starting node server.js
Your app is listening on port 8080
[Function]
当我希望它像本教程中的html一样时,好像dynamicScraper
返回了[Function]
吗?
server.js主文件
'use strict';
const express = require('express');
const cors = require('cors');
const app = express();
const cheerio = require('./potusScraper');
app.use(express.json());
app.use(
cors({
origin: ['http://localhost:3000']
})
);
app.get('/', (req, res) => {
let { scraper, dynamicScraper } = cheerio;
//dynamicScraper should return html as a string?
dynamicScraper()
.then(html => {
res.send(html);
})
.catch(err => {
console.log(err);
});
});
app.listen(process.env.PORT || 8080, () => {
console.log(`Your app is listening on port ${process.env.PORT || 8080}`);
});
potusScraper.js文件
'use strict';
const rp = require('request-promise');
const $ = require('cheerio');
const puppeteer = require('puppeteer');
const url = 'https://en.wikipedia.org/wiki/List_of_Presidents_of_the_United_States';
const url2 = 'https://www.reddit.com';
const cheerio = {
scraper: function() {
return rp(url)
.then(html => {
const wikiUrls=[];
for (let i = 0; i < 45; i++) {
wikiUrls.push($('big > a', html)[i].attribs.href);
}
return(wikiUrls);
})
.catch(err => console.log(err))
},
dynamicScraper: function() {
return puppeteer //doesn't look like this works?
.launch()
.then(browser => {
return browser.newPage();
})
.then(page => {
return page.goto(url2)
.then(() => {return page.content});
})
.then(html => {
console.log(html);
return(html);
})
.catch(err => console.log(err));
}
}
module.exports = cheerio;
答案 0 :(得分:1)
您将在此代码行中返回page.content
函数,而不是调用它:
.then(page => {
return page.goto(url2)
.then(() => {return page.content});
})
第三行应如下所示:
.then(() => {return page.content()});
此外,您可以使用简洁的箭头功能来简化代码:
.then(() => page.content());