致命错误:在null

时间:2018-04-24 10:17:10

标签: php web-crawler syntax-error

我修改了一些我在这里找到的爬虫,一切都运行正常,但我遇到了一些问题。第一个是致命错误:在null上调用成员函数hasAttribute()。我怎么能绕过这个?第一条记录总是生成空的,我也不知道为什么。 另一件事是我的XML会在每条记录之后覆盖而不是添加更多元素。

非常感谢您的帮助!

<?php
ini_set('display_errors', 1);
ini_set('display_startup_errors', 1);
error_reporting(E_ALL);
/*
 * howCode Web Crawler Tutorial Series Source Code
 * Copyright (C) 2016
 *
 * This program is free software: you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation, either version 3 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
 *
 * https://howcode.org
 *
*/

// This is our starting point. Change this to whatever URL you want.
$start = "https://www.xxxxx.com/xxx.xml";

// Our 2 global arrays containing our links to be crawled.
$already_crawled = array();
$crawling = array();
function array_to_xml( $data, &$xml_data ) {
    foreach( $data as $key => $value ) {
        if( is_numeric($key) ){
            $key = 'item'.$key; //dealing with <0/>..<n/> issues
        }
        if( is_array($value) ) {
            $subnode = $xml_data->addChild($key);
            array_to_xml($value, $subnode);
        } else {
            $xml_data->addChild("$key",htmlspecialchars("$value"));
        }
     }
}
function get_details($url) {

    // The array that we pass to stream_context_create() to modify our User Agent.
    $options = array('http'=>array('method'=>"GET", 'headers'=>"User-Agent: howBot/0.1\n"));
    // Create the stream context.
    $context = stream_context_create($options);
    // Create a new instance of PHP's DOMDocument class.
    $doc = new DOMDocument();

    //var_dump($doc);
    // Use file_get_contents() to download the page, pass the output of file_get_contents()
    // to PHP's DOMDocument class.
    @$doc->loadHTML(@file_get_contents($url, false, $context));

    // Create an array of all of the title tags.
    $title = $doc->getElementsByTagName("h1");
    // There should only be one <title> on each page, so our array should have only 1 element.
    $title = $title->item(0)->nodeValue;

    $data->title = $title;
    // Give $description and $keywords no value initially. We do this to prevent errors.
    $description = "";
    $keywords = "";
    // Create an array of all of the pages <meta> tags. There will probably be lots of these.
    $proddesc="product-description";
    $finder = new DomXPath($doc);
    $spaners = $finder->query("//*[contains(@class, '$proddesc')]");
    $spaners = $spaners->item(0)->nodeValue;    
    $data->spanner = $spaners;

    $imgdata = "product-big-picture";
    $finder8 = new DomXPath($doc);
    $img = $finder8->query('//*[@class="product-big-picture"]');
    if ($img->item(0)->hasAttribute('src')){
        $img = $img->item(0)->getAttribute('src');
    }else{
        $img='';
    }
    $data->img = $img;

    var_dump($img);

    $proddescother ="other-desc";
    $finder2 = new DomXPath($doc);
    $descOther = $finder2->query("//*[contains(@class, '$proddescother')]");
    $descOther = $descOther->item(0)->nodeValue;
    $data->desc = $descOther;

    $eanhtml = "product-info";
    $finder3 = new DomXPath($doc);
    $ean = $finder3->query("//*[contains(@class, '$eanhtml')]");
    $ean = $ean->item(0)->nodeValue;

    //$ShortDesc    = $doc->getElementsByClassName("product-description");
    //var_dump($spaner);
    //$LongDesc     = $doc->getElementsByClassName("other-desc");
    //$ean      = $doc->getElementsByClassName("product-info");
    // Loop through all of the <meta> tags we find.
    /*for ($i = 0; $i < $metas->length; $i++) {
        $meta = $metas->item($i);
        // Get the description and the keywords.
        if (strtolower($meta->getAttribute("name")) == "description")
            $description = $meta->getAttribute("content");
        if (strtolower($meta->getAttribute("name")) == "keywords")
            $keywords = $meta->getAttribute("content");

    }*/
    // Return our JSON string containing the title, description, keywords and URL.
    $eanfinalPiece = explode(':', $ean);
    $eanfinal = $eanfinalPiece[1];
    $data->ean = $eanfinal;

    $product_data = json_encode($data);
    var_dump($product_data);


    //$json = '{ "Title": "'.str_replace("\n", "", $title).'", "Short Desc": "'.str_replace("\n", "", $spaners).'", "Long Desc": "'.str_replace("\n", "", $descOther).'", "EAN": "'.$eanfinal.'"},';
    $array = json_decode($product_data, true);
    //var_dump($json);
    $xml_data = new SimpleXMLElement('<?xml version="1.0"?><data></data>');
    array_to_xml($data,$xml_data);

    //saving generated xml file; 
    $result .= $xml_data->asXML('data.xml');
    return true;
}

function follow_links($url) {
    // Give our function access to our crawl arrays.
    global $already_crawled;
    //var_dump($already_crawled);
    global $crawling;
    // The array that we pass to stream_context_create() to modify our User Agent.
    $options = array('http'=>array('method'=>"GET", 'headers'=>"User-Agent: howBot/0.1\n"));
    // Create the stream context.
    $context = stream_context_create($options);
    // Create a new instance of PHP's DOMDocument class.
    $doc = new DOMDocument();
    // Use file_get_contents() to download the page, pass the output of file_get_contents()
    // to PHP's DOMDocument class.
    @$doc->loadHTML(@file_get_contents($url, false, $context));
    // Create an array of all of the links we find on the page.
    $linklist = $doc->getElementsByTagName("loc");
    //var_dump($linklist);
    // Loop through all of the links we find.
    foreach ($linklist as $link) {
        $l =  $link->textContent;
        //var_dump($link->textContent);
        // Process all of the links we find. This is covered in part 2 and part 3 of the video series.
        if (substr($l, 0, 1) == "/" && substr($l, 0, 2) != "//") {
            $l = parse_url($url)["scheme"]."://".parse_url($url)["host"].$l;
        } else if (substr($l, 0, 2) == "//") {
            $l = parse_url($url)["scheme"].":".$l;
        } else if (substr($l, 0, 2) == "./") {
            $l = parse_url($url)["scheme"]."://".parse_url($url)["host"].dirname(parse_url($url)["path"]).substr($l, 1);
        } else if (substr($l, 0, 1) == "#") {
            $l = parse_url($url)["scheme"]."://".parse_url($url)["host"].parse_url($url)["path"].$l;
        } else if (substr($l, 0, 3) == "../") {
            $l = parse_url($url)["scheme"]."://".parse_url($url)["host"]."/".$l;
        } else if (substr($l, 0, 11) == "javascript:") {
            continue;
        } else if (substr($l, 0, 5) != "https" && substr($l, 0, 4) != "http") {
            $l = parse_url($url)["scheme"]."://".parse_url($url)["host"]."/".$l;
        }
        // If the link isn't already in our crawl array add it, otherwise ignore it.
        if (!in_array($l, $already_crawled)) {
                $already_crawled[] = $l;
                $crawling[] = $l;
                // Output the page title, descriptions, keywords and URL. This output is
                // piped off to an external file using the command line.
                echo get_details($l)."\n";
        }

    }
    // Remove an item from the array after we have crawled it.
    // This prevents infinitely crawling the same page.
    array_shift($crawling);
    // Follow each link in the crawling array.
    foreach ($crawling as $site) {
        follow_links($site);
    }

}
// Begin the crawling process by crawling the starting link first.
follow_links($start);

1 个答案:

答案 0 :(得分:0)

尝试替换

if ($img->item(0)->hasAttribute('src')){

if ($img->item(0) && $img->item(0)->hasAttribute('src')){