我怎么能用承诺重写这个?

时间:2016-09-22 07:02:54

标签: javascript node.js web-scraping promise

我正在为T恤网站构建内容抓取工具。

目标是只通过一个硬编码网址http://shirts4mike.com

进入网站

然后,我将找到每个T恤的所有产品页面,然后使用它的详细信息创建一个对象。然后将其添加到数组中。

当阵列中装满了T恤时,我将通过阵列工作并将其记录到CSV文件中。

现在,我在请求/响应和函数调用的时间方面遇到了一些麻烦。

如何确保在正确的时间调用NEXT功能?我知道它因为它的异步性而无法正常工作。

如何在合适的时间致电secondScrapelastScraperconvertJson2Csv,以便他们正在使用的变量未定义?

我尝试使用response.end()之类的内容,但这不起作用。

我假设我需要使用承诺才能使这项工作正常进行?并且清晰可辨?

有什么想法吗?我的代码如下:

//Modules being used:
var cheerio = require('cheerio');
var request = require('request');
var moment = require('moment');

//hardcoded url
var url = 'http://shirts4mike.com/';

//url for tshirt pages
var urlSet = new Set();

var remainder;
var tshirtArray;


// Load front page of shirts4mike
request(url, function(error, response, html) {
    if(!error && response.statusCode == 200){
        var $ = cheerio.load(html);

    //iterate over links with 'shirt'
        $("a[href*=shirt]").each(function(){
            var a = $(this).attr('href');

            //create new link
            var scrapeLink = url + a;

            //for each new link, go in and find out if there is a submit button. 
            //If there, add it to the set
            request(scrapeLink, function(error,response, html){
                if(!error && response.statusCode == 200) {
                    var $ = cheerio.load(html);

                    //if page has a submit it must be a product page
                    if($('[type=submit]').length !== 0){

                        //add page to set
                        urlSet.add(scrapeLink);

                    } else if(remainder === undefined) {
                        //if not a product page, add it to remainder so it another scrape can be performed.
                        remainder = scrapeLink;                     
                    }
                }
            });
        });     
    }
    //call second scrape for remainder
    secondScrape();
});


function secondScrape() {
    request(remainder, function(error, response, html) {
        if(!error && response.statusCode == 200){
            var $ = cheerio.load(html);

            $("a[href*=shirt]").each(function(){
                var a = $(this).attr('href');

                //create new link
                var scrapeLink = url + a;

                request(scrapeLink, function(error,response, html){
                    if(!error && response.statusCode == 200){

                        var $ = cheerio.load(html);

                        //collect remaining product pages and add to set
                        if($('[type=submit]').length !== 0){
                            urlSet.add(scrapeLink);
                        }
                    }
                });
            });     
        }
    });
    console.log(urlSet);
    //call lastScraper so we can grab data from the set (product pages)
    lastScraper();
};



function lastScraper(){
    //scrape set, product pages
    for(var i = 0; i < urlSet.length; i++){
        var url = urlSet[i];

        request(url, function(error, response, html){
            if(!error && response.statusCode == 200){
                var $ = cheerio.load(html);

                //grab data and store as variables
                var price = $('.price').text();
                var img = $('.shirt-picture').find("img").attr("src");
                var title = $('body').find(".shirt-details > h1").text().slice(4);

                var tshirtObject = {};
                //add values into tshirt object

                tshirtObject.price = price;
                tshirtObject.img = img;
                tshirtObject.title = title;
                tshirtObject.url = url;
                tshirtObject.date = moment().format('MMMM Do YYYY, h:mm:ss a');

                //add the object into the array of tshirts
                tshirtArray.push(tshirtObject); 
            }
        });
    }
    //call function to iterate through tshirt objects in array in order to convert to JSON, then into CSV to be logged
    convertJson2Csv();
};

4 个答案:

答案 0 :(得分:0)

有一个名为getFirstValue(String name)的npm模块。

简单地:

var rp = require("request-promise");

在您提出请求的任何地方,您都可以切换请求承诺。

例如:

rp(url)
.then(function(value){
  //do whatever
})
.catch(function(err){
  console.log(err)
})

答案 1 :(得分:0)

  

您可以使用waterfall模块的async方法,它可以让您顺利解决此问题。

我只是尝试使用此模块执行您的代码

希望这对你有用

瀑布格式

async.waterfall([
  function(callback) {
    callback(null, previousvalue);
  },
  function(previousvalue, callback) {}
], function(err, result) { //Final callback

});
var async = require('async');
var cheerio = require('cheerio');
var request = require('request');
var moment = require('moment');

//hardcoded url
var url = 'http://shirts4mike.com/';

//url for tshirt pages
var urlSet = new Set();

var remainder;
var tshirtArray = [];


async.waterfall([
  function(callback) {
    // Load front page of shirts4mike
    request(url, function(error, response, html) {
      if (!error && response.statusCode == 200) {
        var $ = cheerio.load(html);

        //iterate over links with 'shirt'
        $("a[href*=shirt]").each(function() {
          var a = $(this).attr('href');

          //create new link
          var scrapeLink = url + a;

          //for each new link, go in and find out if there is a submit button. 
          //If there, add it to the set
          request(scrapeLink, function(error, response, html) {
            if (!error && response.statusCode == 200) {
              var $ = cheerio.load(html);

              //if page has a submit it must be a product page
              if ($('[type=submit]').length !== 0) {

                //add page to set
                urlSet.add(scrapeLink);
                callback(null, true);

              } else if (remainder === undefined) {
                //if not a product page, add it to remainder so it another scrape can be performed.
                remainder = scrapeLink;
                callback(nul, true);
              }
            }
          });
        });
      }
      //call second scrape for remainder
      // secondScrape();
    });
  },
  function(previousvalue, callback) {
    request(remainder, function(error, response, html) {
      if (!error && response.statusCode == 200) {
        var $ = cheerio.load(html);

        $("a[href*=shirt]").each(function() {
          var a = $(this).attr('href');

          //create new link
          var scrapeLink = url + a;

          request(scrapeLink, function(error, response, html) {
            if (!error && response.statusCode == 200) {

              var $ = cheerio.load(html);

              //collect remaining product pages and add to set
              if ($('[type=submit]').length !== 0) {
                urlSet.add(scrapeLink);
              }
              callback(null, true);
            }
          });
        });
      }
    });
    console.log(urlSet);
    //call lastScraper so we can grab data from the set (product pages)
  },
  function(previousvalue, callback) {
    //scrape set, product pages
    for (var i = 0; i < urlSet.length; i++) {
      var url = urlSet[i];

      request(url, function(error, response, html) {
        if (!error && response.statusCode == 200) {
          var $ = cheerio.load(html);

          //grab data and store as variables
          var price = $('.price').text();
          var img = $('.shirt-picture').find("img").attr("src");
          var title = $('body').find(".shirt-details > h1").text().slice(4);

          var tshirtObject = {};
          //add values into tshirt object

          tshirtObject.price = price;
          tshirtObject.img = img;
          tshirtObject.title = title;
          tshirtObject.url = url;
          tshirtObject.date = moment().format('MMMM Do YYYY, h:mm:ss a');

          //add the object into the array of tshirts
          tshirtArray.push(tshirtObject);
        }
      });
    }
  }
], function(err, result) {
  //call function to iterate through tshirt objects in array in order to convert to JSON, then into CSV to be logged
  convertJson2Csv();
});

答案 2 :(得分:0)

您可以使用此示例转换代码示例的其余部分。

promise = new Promise((resolve, reject) => ( 
    request("http://shirts4mike.com/", 
    (err, response, html) => (response.statusCode == 200 ? resolve(html): reject(err))
)));


promise.then(html => {
    var $ = cheerio.load(html);
    // continue
});

答案 3 :(得分:0)

您正确地将承诺确定为解决时间问题的方法。

为了使promise可用,你需要promisify request(或采用HTTP lib,其方法返回promises)。

你可以用承诺来解决时间问题,但你也可以借此机会改进整体范式。您可以编写一个递归调用自身的函数,而不是几乎相同的第一/第二/第三阶段的离散函数。如果写得正确,这将确保目标站点中的每个页面最多访问一次;应该根据整体性能和目标服务器的加载来避免重新访问。

//Modules being used:
var Promise = require('path/to/bluebird');
var cheerio = require('cheerio');
var moment = require('moment');

// Promisify `request` to make `request.getAsync()` available.
// Ref: http://stackoverflow.com/questions/28308131/how-do-you-properly-promisify-request
var request = Promise.promisify(require('request'));
Promise.promisifyAll(request);

//hardcoded url
var url = 'http://shirts4mike.com/';

var urlSet = new Set();
var tshirtArray = [];

var maxLevels = 3; // limit the recursion to this number of levels.

function scrapePage(url_, levelCounter) {
    // Bale out if :
    //   a) the target url_ has been visited already,
    //   b) maxLevels has been reached.
    if(urlSet.has(url_) || levelCounter >= maxLevels) {
        return Promise.resolve();
    }
    urlSet.add(url_);

    return request.getAsync(url_).then(function(response, html) {
        var $;
        if(response.statusCode !== 200) {
            throw new Error('statusCode was not 200'); // will be caught below
        }
        $ = cheerio.load(html);
        if($('[type=submit]').length > 0) {
            // yay, it's a product page.
            tshirtArray.push({
                price: $('.price').text(),
                img: $('.shirt-picture').find("img").attr("src"),
                title: $('body').find(".shirt-details > h1").text().slice(4),
                url: url_,
                date: moment().format('MMMM Do YYYY, h:mm:ss a')
            });
        }
        // find any shirt links on page represented by $, visit each link in turn, and scrape.
        return Promise.all($("a[href*=shirt]").map(function(link) {
            return scrapePage(link.href, levelCounter + 1);
        }).get());
    }).catch(function(e) {
        // ensure "success" even if scraping threw an error.
        console.log(e);
        return null;
    });
}

scrapePage(url, 0).then(convertJson2Csv);

如您所见,递归解决方案:

  • 避免重复代码,
  • 会根据您的意愿向下钻取多个级别 - 由变量maxLevels确定。

注意:这仍然不是一个好的解决方案。这里有一个隐含的假设,就像在原始代码中一样,所有衬衫页面都可以从网站的主页上通过“衬衫”链接单独访问。如果衬衫可以通过例如“衣服”&gt;到达。 “衬衫”,然后上面的代码将找不到任何衬衫。