Java Webcrawler提取电子邮件

时间:2018-10-22 14:42:18

标签: java email web-crawler

我想编写一个网络爬虫,该爬虫从一页开始,然后转到该页面上的每个链接以查找电子邮件地址。到目前为止,这就是我要做的,但是除了从一个网页转到另一个网页之外,它没有做任何其他事情。

`package com.netinstructions.crawler;

import java.util.HashSet;
import java.util.LinkedList;
import java.util.List;
import java.util.Set;

public class WebCrawler {

    private static final int MAX_PAGES_TO_SEARCH = 26;
    private Set<String> pagesVisited = new HashSet<String>();
    private List<String> pagesToVisit = new LinkedList<String>();
    private List<String> emails = new LinkedList<>();

private String nextUrl()
{
    String nextUrl;
    do
    {
        nextUrl = this.pagesToVisit.remove(0);
    } while(this.pagesVisited.contains(nextUrl));
    this.pagesVisited.add(nextUrl);
    return nextUrl;
}

public void search(String url, String searchWord)
{
    while(this.pagesVisited.size() < MAX_PAGES_TO_SEARCH)
    {
        String currentUrl;
        SpiderLeg leg = new SpiderLeg();
        if(this.pagesToVisit.isEmpty())
        {
            currentUrl = url;
            this.pagesVisited.add(url);
        }
        else
        {
            currentUrl = this.nextUrl();
        }
        leg.crawl(currentUrl); // Lots of stuff happening here. Look at the crawl method in
        // SpiderLeg
        leg.searchForWord(currentUrl, emails);
        this.pagesToVisit.addAll(leg.getLinks());
        this.pagesToVisit.addAll(leg.getLinks());
    }
    System.out.println(emails.toString());
    //System.out.println(String.format("**Done** Visited %s web page(s)", this.pagesVisited.size()));
}
}

这是我的蜘蛛腿课

package com.netinstructions.crawler;

import java.io.IOException;
import java.util.LinkedList;
import java.util.List;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

import org.jsoup.Connection;
import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;

public class SpiderLeg
{
// We'll use a fake USER_AGENT so the web server thinks the robot is a normal web browser.
private static final String USER_AGENT =
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/13.0.782.112 Safari/535.1";
private List<String> links = new LinkedList<String>();
private Document htmlDocument;


/**
 * This performs all the work. It makes an HTTP request, checks the response, and then gathers
 * up all the links on the page. Perform a searchForWord after the successful crawl
 *
 * @param url
 *            - The URL to visit
 * @return whether or not the crawl was successful
 */
public boolean crawl(String url)
{
    try
    {
        Connection connection = Jsoup.connect(url).userAgent(USER_AGENT);
        Document htmlDocument = connection.get();
        this.htmlDocument = htmlDocument;
        if(connection.response().statusCode() == 200) // 200 is the HTTP OK status code
        // indicating that everything is great.
        {
            System.out.println("\n**Visiting** Received web page at " + url);
        }
        if(!connection.response().contentType().contains("text/html"))
        {
            System.out.println("**Failure** Retrieved something other than HTML");
            return false;
        }
        Elements linksOnPage = htmlDocument.select("a[href]");
        //System.out.println("Found (" + linksOnPage.size() + ") links");
        for(Element link : linksOnPage)
        {
            this.links.add(link.absUrl("href"));
        }
        return true;
    }
    catch(IOException ioe)
    {
        // We were not successful in our HTTP request
        return false;
    }
}


/**
 * Performs a search on the body of on the HTML document that is retrieved. This method should
 * only be called after a successful crawl.
 *
 * @param searchWord
 *            - The word or string to look for
 * @return whether or not the word was found
 */
public void searchForWord(String searchWord, List<String> emails)
{

    if(this.htmlDocument == null)
    {
        System.out.println("ERROR! Call crawl() before performing analysis on the document");
        //return false;
    }
    Pattern pattern =
            Pattern.compile("\"^[A-Z0-9._%+-]+@[A-Z0-9.-]+\\\\.[A-Z]{2,6}$\", Pattern.CASE_INSENSITIVE");

    Matcher matchs = pattern.matcher(searchWord);

    while (matchs.find()) {
        System.out.println(matchs.group());
    }
}


public List<String> getLinks()
{
    return this.links;
}

}

我的Web搜寻器是从另一个来源获得的,我做了几件事。我添加了一个列表来保存电子邮件,并将它们全部退还给我。我认为以电子邮件的方式将它放入列表的方式是对的,但是我不确定如何解决。

1 个答案:

答案 0 :(得分:1)

  

蜘蛛腿类

Pattern.compile("\"^[A-Z0-9._%+-]+@[A-Z0-9.-]+\\\\.[A-Z]{2,6}$\", Pattern.CASE_INSENSITIVE");

这不是...吗?

Pattern.compile("[A-Z0-9._%+-]+@[A-Z0-9.-]+\\.[A-Z]{2,6}", Pattern.CASE_INSENSITIVE);

电子邮件中未添加任何内容,因此您需要emails.push()将找到的电子邮件添加到列表中。其次,您可能想解析HTML文档,而不是页面的URL。由于该方法现在不返回任何内容,因此您需要扩展if语句以避免空指针。 searchForWord方法应为:

public void searchForWord(String searchWord, List<String> emails)
{

    if(this.htmlDocument == null)
    {
        System.out.println("ERROR! Call crawl() before performing analysis on the document");
    } else
    {
        String input = this.htmlDocument.toString();

        Pattern pattern =
                Pattern.compile("[A-Z0-9._%+-]+@[A-Z0-9.-]+\\.[A-Z]{2,6}", Pattern.CASE_INSENSITIVE);

        Matcher matchs = pattern.matcher(input);

        while (matchs.find()) {
            emails.push(matchs.group());
        }
    }
}