gpt4 book ai didi

java - Apache HttpClient 不使用分页链接获取页面内容。我得到 200 状态,但 html 没有内容

转载 作者:行者123 更新时间:2023-12-01 18:33:08 25 4
gpt4 key购买 nike

我正在尝试使用 Apache HttpClient 对内容页面进行网络爬网。使用分页链接请求下一页时,我得到状态 200,但 HTML 在正文中显示 500,没有内容。 Postman 工作正常,即使使用分页链接也能获取内容。

主类

public static void main(String[] args) {
String url = "https://www.cochranelibrary.com/cdsr/reviews/topics";
MyContentFetcher myContentFetcher = new MyContentFetcher();
MyParser myParser = new MyParser();
try {
// Load Topic list page
String topicsPage = myContentFetcher.loadHTML(url);

// Getting all the topics.
Map<Integer, MyNode> topics = myParser.getTopicList(topicsPage);

// Print all the topics and ask user to choose one
for (int id : topics.keySet())
System.out.println("-> " + id + " <- " + topics.get(id).getTopic());
System.out.println("********************");
System.out.print("Enter ID number from the list above to get reviews or enter anything else to exit:\n");
BufferedReader reader = new BufferedReader(new InputStreamReader(System.in));
String id = reader.readLine();

// Validate user input, get the link and topic and cout the choice.
if (isNumber(id)) {
int idNum = Integer.parseInt(id);
if (idNum <= topics.size() && idNum > 0) {
String topic = topics.get(idNum).getTopic();
String link = topics.get(idNum).getLink();
System.out.println("You picked: " + topic + link + "\n***************************");
// Loading first page of reviews
myParser.loadReviews(myContentFetcher.loadHTML(link), topic);
// Getting links to other pages
Queue<String> paginationLinks = myParser.getLinks();

// --------------> WORKS FINE UNTIL HERE <--------------
// Problem starts here....
// Load list of reviews for chosen topic
while(!paginationLinks.isEmpty()) {
String page = myContentFetcher.loadHTML(paginationLinks.remove());
myParser.loadReviews(page, topic);
}
}
}
System.out.println("Exiting...");

} catch (IOException e) {
System.out.println("There was a problem...");
}

!!!这是获取 HTML 的类。我可能在这里做错了什么......

import org.apache.http.client.config.CookieSpecs;
import org.apache.http.client.config.RequestConfig;
import org.apache.http.client.methods.CloseableHttpResponse;
import org.apache.http.client.methods.HttpGet;
import org.apache.http.impl.client.CloseableHttpClient;
import org.apache.http.impl.client.HttpClients;

import java.io.IOException;

import java.util.Scanner;

public class MyContentFetcher {

public MyContentFetcher() {
}

String loadHTML(String url) throws IOException {
// Create configurations for
RequestConfig config = RequestConfig.custom()
.setCircularRedirectsAllowed(true)
.setCookieSpec(CookieSpecs.STANDARD)
.build();
// Creating a HttpClient object
CloseableHttpClient httpClient = HttpClients.custom()
.setDefaultRequestConfig(config)
.build();
// Creating a HttpGet object
HttpGet httpget = new HttpGet(url);
httpget.setHeader("User-Agent", "Mozilla/5.0 (Linux; Android 8.1.0; Pixel Build/OPM4.171019.021.D1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.109 Mobile Safari/537.36 EdgA/42.0.0.2057");
CloseableHttpResponse httpResponse = httpClient.execute(httpget);
Scanner sc = new Scanner(httpResponse.getEntity().getContent());
StringBuilder page = new StringBuilder("");
while(sc.hasNext())
page.append(sc.nextLine()).append(" ");
httpResponse.close();
httpClient.close();
return page.toString();
}
}

这是解析器。解析器没有任何问题(解析完全正确并且根据需要)

import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;

import java.io.IOException;
import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.*;

public class MyParser {

private Map<String, String> topics;
private Document htmlPage;
private Element reviewBlock;

public MyParser(){}

// Loads all topics from the Cochrane Library into a map -> (Topic Name, Link)
public Map<Integer, MyNode> getTopicList(String page) {
Map<Integer, MyNode> topics= new HashMap<Integer, MyNode>();
htmlPage = Jsoup.parse(page);
// Get 'a' element that is inside 'li' with a class name of browse-by-list-item
int i = 1;
MyNode info;
for(Element element : htmlPage.body().select("li.browse-by-list-item > a")) {
info = new MyNode(element.select("button").text(),
element.select("a").attr("href").trim());
topics.put(i, info);
i++;
}
return topics;
}

// Loads Reviews
public void loadReviews(String page, String topic) throws IOException {
htmlPage = Jsoup.parse(page);
// Get all review blocks
System.out.println("**************************\n" + page + "\n**************************\n");
for(Element element : htmlPage.body().select(".search-results-item-body")){
reviewBlock = element;
String review = getLink() + " | " + topic + " | " + getTitle() + " | " + getAuthor() + " | " + getDate();
System.out.println(review);
}
}

Queue<String> getLinks(){
System.out.println("GETTING LINKS");
Queue<String> links = new LinkedList<>();
for(Element element : htmlPage.body().select("li.pagination-page-list-item > a")) {
links.add(element.attr("href"));
}
return links;
}

private String getLink(){
return "https://www.cochranelibrary.com" + reviewBlock.select("a").attr("href");
}

public String getTitle(){
return reviewBlock.selectFirst("a").text();
}

public String getAuthor(){
return reviewBlock.selectFirst("div.search-result-authors").text();
}

public String getDate(){
String result = reviewBlock.select("div.search-result-date > div").text();
try {
SimpleDateFormat fmt = new SimpleDateFormat("dd MMMM yyyy", Locale.US);
Date d = fmt.parse(result);
fmt.applyPattern("yyyy-MM-dd");
result = fmt.format(d);
} catch (ParseException e) {
System.out.println("Failed parsing the date...");
}
return result;
}

最佳答案

如果我有权限,这将只是一条评论。
我使用您提供的 URL 运行了您的 loadHtml 函数,我得到的结果或多或少等于页面的 html。

您能否提供有关 httpclient 库的更多详细信息?我正在使用具有此依赖性的 Java 12(我确信它也可以与 Java 8 一起使用)

<dependency>
<groupId>org.apache.httpcomponents</groupId>
<artifactId>httpclient</artifactId>
<version>4.5.11</version>
</dependency>

关于java - Apache HttpClient 不使用分页链接获取页面内容。我得到 200 状态,但 html 没有内容,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/60125005/

25 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com