Apache HttpClient无法使用分页链接获取页面内容。我的状态为200,但是html没有内容

时间:2020-02-08 08:04:23

标签: java httpclient apache-httpclient-4.x apache-httpcomponents

我正在尝试使用Apache HttpClient对内容页面进行网络爬网。使用分页链接请求下一页时,状态为200,但HTML正文中显示500,但没有内容。即使使用分页链接,邮递员也能很好地工作并获得内容。

主班

public static void main(String[] args) {
        String url = "https://www.cochranelibrary.com/cdsr/reviews/topics";
        MyContentFetcher myContentFetcher = new MyContentFetcher();
        MyParser myParser = new MyParser();
        try {
            // Load Topic list page
            String topicsPage = myContentFetcher.loadHTML(url);

            // Getting all the topics.
            Map<Integer, MyNode> topics = myParser.getTopicList(topicsPage);

            // Print all the topics and ask user to choose one
            for (int id : topics.keySet())
                System.out.println("-> " + id + " <- " + topics.get(id).getTopic());
            System.out.println("********************");
            System.out.print("Enter ID number from the list above to get reviews or enter anything else to exit:\n");
            BufferedReader reader = new BufferedReader(new InputStreamReader(System.in));
            String id = reader.readLine();

            // Validate user input, get the link and topic and cout the choice.
            if (isNumber(id)) {
                int idNum = Integer.parseInt(id);
                if (idNum <= topics.size() && idNum > 0) {
                    String topic = topics.get(idNum).getTopic();
                    String link = topics.get(idNum).getLink();
                    System.out.println("You picked: " + topic + link + "\n***************************");
                    // Loading first page of reviews
                    myParser.loadReviews(myContentFetcher.loadHTML(link), topic);
                    // Getting links to other pages
                    Queue<String> paginationLinks = myParser.getLinks();

                    // --------------> WORKS FINE UNTIL HERE <--------------
                    // Problem starts here....
                    // Load list of reviews for chosen topic
                    while(!paginationLinks.isEmpty()) {
                        String page = myContentFetcher.loadHTML(paginationLinks.remove());
                        myParser.loadReviews(page, topic);
                    }
                }
            }
            System.out.println("Exiting...");

        } catch (IOException e) {
            System.out.println("There was a problem...");
        }

!!!!这是获取HTML的类。我可能在这里做错了...

import org.apache.http.client.config.CookieSpecs;
import org.apache.http.client.config.RequestConfig;
import org.apache.http.client.methods.CloseableHttpResponse;
import org.apache.http.client.methods.HttpGet;
import org.apache.http.impl.client.CloseableHttpClient;
import org.apache.http.impl.client.HttpClients;

import java.io.IOException;

import java.util.Scanner;

public class MyContentFetcher {

    public MyContentFetcher() {
    }

    String loadHTML(String url) throws IOException {
        // Create configurations for
        RequestConfig config = RequestConfig.custom()
                .setCircularRedirectsAllowed(true)
                .setCookieSpec(CookieSpecs.STANDARD)
                .build();
        // Creating a HttpClient object
        CloseableHttpClient httpClient = HttpClients.custom()
                .setDefaultRequestConfig(config)
                .build();
        // Creating a HttpGet object
        HttpGet httpget = new HttpGet(url);
        httpget.setHeader("User-Agent", "Mozilla/5.0 (Linux; Android 8.1.0; Pixel Build/OPM4.171019.021.D1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.109 Mobile Safari/537.36 EdgA/42.0.0.2057");
        CloseableHttpResponse httpResponse = httpClient.execute(httpget);
        Scanner sc = new Scanner(httpResponse.getEntity().getContent());
        StringBuilder page = new StringBuilder("");
        while(sc.hasNext())
            page.append(sc.nextLine()).append(" ");
        httpResponse.close();
        httpClient.close();
        return page.toString();
    }
}

这里是解析器。解析器没有任何问题(根据需要可以很好地解析)

import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;

import java.io.IOException;
import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.*;

public class MyParser {

    private Map<String, String> topics;
    private Document htmlPage;
    private Element reviewBlock;

    public MyParser(){}

    // Loads all topics from the Cochrane Library into a map -> (Topic Name, Link)
    public Map<Integer, MyNode> getTopicList(String page) {
        Map<Integer, MyNode> topics= new HashMap<Integer, MyNode>();
        htmlPage = Jsoup.parse(page);
        // Get 'a' element that is inside 'li' with a class name of browse-by-list-item
        int i = 1;
        MyNode info;
        for(Element element : htmlPage.body().select("li.browse-by-list-item > a")) {
            info = new MyNode(element.select("button").text(),
                    element.select("a").attr("href").trim());
            topics.put(i, info);
            i++;
        }
        return topics;
    }

    // Loads Reviews
    public void loadReviews(String page, String topic) throws IOException {
        htmlPage = Jsoup.parse(page);
        // Get all review blocks
        System.out.println("**************************\n" + page + "\n**************************\n");
        for(Element element : htmlPage.body().select(".search-results-item-body")){
            reviewBlock = element;
            String review = getLink() + " | " + topic + " | " + getTitle() + " | " + getAuthor() + " | " + getDate();
            System.out.println(review);
        }
    }

    Queue<String> getLinks(){
        System.out.println("GETTING LINKS");
        Queue<String> links = new LinkedList<>();
        for(Element element : htmlPage.body().select("li.pagination-page-list-item > a")) {
            links.add(element.attr("href"));
        }
        return links;
    }

    private String getLink(){
        return "https://www.cochranelibrary.com" + reviewBlock.select("a").attr("href");
    }

    public String getTitle(){
        return reviewBlock.selectFirst("a").text();
    }

    public String getAuthor(){
        return reviewBlock.selectFirst("div.search-result-authors").text();
    }

    public String getDate(){
        String result = reviewBlock.select("div.search-result-date > div").text();
        try {
            SimpleDateFormat fmt = new SimpleDateFormat("dd MMMM yyyy", Locale.US);
            Date d = fmt.parse(result);
            fmt.applyPattern("yyyy-MM-dd");
            result = fmt.format(d);
        } catch (ParseException e) {
            System.out.println("Failed parsing the date...");
        }
        return result;
    }

2 个答案:

答案 0 :(得分:0)

如果我拥有权限,那将只是一条评论。
我使用您提供的URL运行了loadHtml函数,得到的结果或多或少等于页面的html。

能否请您提供有关httpclient库的更多详细信息?我正在使用具有这种依赖关系的Java 12(我很肯定它也可以在Java 8中使用)

<dependency>
  <groupId>org.apache.httpcomponents</groupId>
  <artifactId>httpclient</artifactId>
  <version>4.5.11</version>
</dependency>

答案 1 :(得分:0)

为解决此问题,我创建了一个会话,并使用cookie存储,并在每次获取页面后使用CloseableHttpResponse关闭响应。这是使其工作的代码片段:

RequestConfig config = RequestConfig.custom()
                .setCircularRedirectsAllowed(true)
                .build();
        httpClient = HttpClients.custom()
                .setDefaultRequestConfig(config)
                .setMaxConnPerRoute(100)
                .build();
        CookieStore cookieStore = new BasicCookieStore();
        httpContext = new BasicHttpContext();
        httpContext.setAttribute(HttpClientContext.COOKIE_STORE, cookieStore);

HttpGet httpget = new HttpGet(url);
        httpget.setHeader("User-Agent", "Whatever");
        StringBuilder page = new StringBuilder("");
        try {
            CloseableHttpResponse response = httpClient.execute(httpget, httpContext);
            System.out.println(response.getStatusLine());
            Scanner sc = new Scanner(response.getEntity().getContent());
            while (sc.hasNext())
                page.append(sc.nextLine()).append(" ");
            sc.close();
            response.close();
        } catch (IOException e) {
            e.printStackTrace();
        }

        return page.toString();
相关问题