Jsoup Reddit Image Scraper超过18期

时间:2015-11-16 15:27:13

标签: java web-scraping jsoup reddit

我正在处理一个使用JSOUP擦除各种subreddits的第一页的图像处理程序。然而,出现的问题是在尝试刮取NSFW subreddit时,reddit重定向到超过18个身份验证页面,而刮刀则会刮擦身份验证页面。我是一个新的人,并且明白这是一个noob问题,但任何帮助都会非常感激,因为我完全迷失了。

import java.io.IOException;

import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;

import java.io.BufferedReader;
import java.io.BufferedWriter;
import java.io.File;
import java.io.FileWriter;
import java.io.IOException;
import java.io.InputStreamReader;
import java.net.MalformedURLException;
import java.net.URL;
import java.net.URLConnection;

import java.io.*;
import java.net.URL;
import java.util.logging.Level;
import java.util.logging.Logger;
import java.io.*;
import java.util.logging.Level;
import java.util.logging.Logger;
import org.jsoup.Jsoup;

import org.jsoup.nodes.Attributes;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;
import java.io.IOException;
import java.net.URL;
import java.util.Scanner;

public class javascraper{
    public static final String USER_AGENT = "<User-Agent: github.com/dabeermasood:v1.2.3 (by /u/swedenotswiss)>";


public static void main (String[]args) throws MalformedURLException
{
    Scanner scan = new Scanner (System.in);
    System.out.println("Where do you want to store the files?");
    String folderpath = scan.next();
    System.out.println("What subreddit do you want to scrape?");
    String subreddit = scan.next();
    subreddit = ("http://reddit.com/r/" + subreddit);

    new File(folderpath + "/" + subreddit).mkdir();


//test

try{
    //gets http protocol
    Document doc = Jsoup.connect(subreddit).userAgent(USER_AGENT).timeout(0).get();

//get page title
String title = doc.title();
System.out.println("title : " + title);

//get all links
Elements links = doc.select("a[href]");

for(Element link : links){

//get value from href attribute
String checkLink = link.attr("href");
Elements images = doc.select("img[src~=(?i)\\.(png|jpe?g|gif)]");
if (imgCheck(checkLink)){ // checks to see if img link j
    System.out.println("link : " + link.attr("href"));
downloadImages(checkLink, folderpath);





}
}



}
catch (IOException e){
e.printStackTrace();
}

}


public static boolean imgCheck(String http){
String png = ".png";
String jpg = ".jpg";
String jpeg = "jpeg"; // no period so checker will only check last four characaters
String gif = ".gif";
int length = http.length();

if (http.contains(png)|| http.contains("gfycat") || http.contains(jpg)|| http.contains(jpeg) || http.contains(gif)){
return true;
}
else{
return false;
}



}



private static void downloadImages(String src, String folderpath) throws IOException{
String folder = null;

        //Exctract the name of the image from the src attribute

        int indexname = src.lastIndexOf("/");

        if (indexname == src.length()) {

            src = src.substring(1, indexname);

        }
 indexname = src.lastIndexOf("/");

        String name = src.substring(indexname, src.length());

        System.out.println(name);

        //Open a URL Stream

        URLConnection connection = (new URL(src)).openConnection();

        try {
            Thread.sleep(2000);
        } catch (InterruptedException e) {

            e.printStackTrace();
        } //Delay to comply with rate limiting
        connection.setRequestProperty("User-Agent", USER_AGENT);

        InputStream in = connection.getInputStream();

        OutputStream out = new BufferedOutputStream(new FileOutputStream( folderpath+ name));



        for (int b; (b = in.read()) != -1;) {

            out.write(b);

        }

        out.close();

        in.close();






}



}

1 个答案:

答案 0 :(得分:1)

我已使用this link中的dispatch_get_global_queue(QOS_CLASS_BACKGROUND, 0)发布了对服务器进行身份验证的答案。基本上您需要Jsoup您的登录ID&amp;密码和其他所需数据到服务器使用:

POST,然后从服务器保存响应cookie以保持会话的身份验证。