如何使用幻像爬虫将html源打印到控制台

时间:2016-03-27 14:10:13

标签: javascript node.js web-crawler

我刚下载并安装了nodejs的幻像爬虫。我将以下脚本复制并粘贴到名为crawler.js的文件中:

var Crawler = require('phantom-crawler');

// Can be initialized with optional options object 
var crawler = new Crawler();
// queue is an array of URLs to be crawled 
crawler.queue.push('https://google.com/');
// Can also do `crawler.fetch(url)` instead of pushing it and crawling it 
// Extract plainText out of each phantomjs page 
Promise.all(crawler.crawl())
.then(function(pages) {
  var texts = [];
  for (var i = 0; i < pages.length; i++) {
    var page = pages[i];
    // suffix Promise to return promises instead of callbacks 
    var text = page.getPromise('plainText');
    texts.push(text);
    text.then(function(p) {
      return function() {
        // Pages are like tabs, they should be closed 
        p.close()
      }
    }(page));
  }
  return Promise.all(texts);
})
.then(function(texts) {
  // texts = array of plaintext from the website bodies 
  // also supports ajax requests 
  console.log(texts);
})
.then(function () {
  // kill that phantomjs bridge 
  crawler.phantom.then(function (p) {
    p.exit();
  });
})

我想将完整的html源(在本例中是从google页面)打印到控制台。

我搜索了很多,但我发现没有类似的东西,所以我该怎么做?

1 个答案:

答案 0 :(得分:1)

获取content而不是plainText承诺。

模块phantom-crawler使用模块node-phantom-simple,该模块使用phantomjs

您可以在phantomjs wiki中找到可以呼叫的属性列表。

var Crawler = require('phantom-crawler');

// Can be initialized with optional options object
var crawler = new Crawler();
// queue is an array of URLs to be crawled
crawler.queue.push('https://google.com/');
// Can also do `crawler.fetch(url)` instead of pushing it and crawling it
// Extract plainText out of each phantomjs page
Promise.all(crawler.crawl())
.then(function(pages) {
  var allHtml = [];
  for (var i = 0; i < pages.length; i++) {
    var page = pages[i];
    // suffix Promise to return promises instead of callbacks
    var html = page.getPromise('content');
    allHtml.push(html);
    html.then(function(p) {
      return function() {
        // Pages are like tabs, they should be closed
        p.close()
      }
    }(page));
  }
  return Promise.all(allHtml);
})
.then(function(allHtml) {
  // allHtml = array of plaintext from the website bodies
  // also supports ajax requests
  console.log(allHtml);
})
.then(function () {
  // kill that phantomjs bridge
  crawler.phantom.then(function (p) {
    p.exit();
  });
})