gpt4 book ai didi

python - 我如何从包含分页的网站中提取链接?(使用 Selenium )

转载 作者:太空狗 更新时间:2023-10-30 01:25:10 24 4
gpt4 key购买 nike

我想从以下站点提取链接,但它确实包含分页: I want to extract link under the MoreInfo Button:

我正在使用以下代码段:

import time
import requests
import csv
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.action_chains import ActionChains
import re


browser = webdriver.Chrome()
time.sleep(5)
browser.get('https://www.usta.com/en/home/play/facility-listing.html?searchTerm=&distance=5000000000&address=Palo%20Alto,%20%20CA')
wait = WebDriverWait(browser,15)

def extract_data(browser):
links = browser.find_elements_by_xpath("//div[@class='seeMoreBtn']/a")
return [link.get_attribute('href') for link in links]


element = WebDriverWait(browser, 10).until(EC.presence_of_element_located((By.XPATH, "//a[@class='glyphicon glyphicon-chevron-right']")))
max_pages = int(re.search(r'\d+ de (\d+)', element.text).group(1), re.UNICODE)
# extract from the current (1) page
print("Page 1")
print(extract_data(browser))

for page in range(2, max_pages + 1):
print("Page %d" % page)
next_page = browser.find_element_by_xpath("//a[@class='glyphicon glyphicon-chevron-right']").click()
print(extract_data(browser))
print("-----")

当我运行上面的脚本时,我得到了这个错误**(我对正则表达式不太熟悉,也只是探索这个概念)**:

Traceback (most recent call last):
File "E:/Python/CSV/testingtesting.py", line 29, in <module>
max_pages = int(re.search(r'\d+ de (\d+)', element.text).group(1), re.UNICODE)
AttributeError: 'NoneType' object has no attribute 'group'

如果可能,请给我建议解决方案。我以某种方式设法使用等待并单击分页链接来提取链接。但是它花费的时间增加了将近 13 秒的等待时间,工作代码如下:

import time
import requests
import csv
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.action_chains import ActionChains
import re



# ----------------------------------------------HANDLING-SELENIUM-STUFF-------------------------------------------------
linkList = []
driver = webdriver.Chrome()
time.sleep(5)
driver.get('https://www.usta.com/en/home/play/facility-listing.html?searchTerm=&distance=5000000000&address=Palo%20Alto,%20%20CA')
wait = WebDriverWait(driver,8)
time.sleep(7)

for i in range(1,2925):
time.sleep(3)
# wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, "//div[@class='seeMoreBtn']/a")))
links = driver.find_elements_by_xpath("//div[@class='seeMoreBtn']/a")
# print(links.text)
time.sleep(3)

#appending extracted links to the list
for link in links:
value=link.get_attribute("href")
# linkList.append(value)
with open('test.csv','a',encoding='utf-8',newline='') as fp:
writer = csv.writer(fp, delimiter=',')
writer.writerow([value])
# print(i," ",)
time.sleep(1)
driver.find_element_by_xpath("//a[@class='glyphicon glyphicon-chevron-right']").click()
time.sleep(6)

最佳答案

尝试下面的代码来获取所需的数据而无需额外的“ sleep ”:

import requests
import csv
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException



# ----------------------------------------------HANDLING-SELENIUM-STUFF-------------------------------------------------
driver = webdriver.Chrome()
driver.get('https://www.usta.com/en/home/play/facility-listing.html?searchTerm=&distance=5000000000&address=Palo%20Alto,%20%20CA')
wait = WebDriverWait(driver, 8)

links = []

while True:
new_links = wait.until(EC.visibility_of_all_elements_located((By.LINK_TEXT, "MORE INFO")))
links.extend([link.get_attribute("href") for link in new_links])

try:
next_button = wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR, "li[title='Next page']>a")))
next_button.click()
except TimeoutException:
break
wait.until(EC.staleness_of(new_links[-1]))

# Do whatever you need with links

关于python - 我如何从包含分页的网站中提取链接?(使用 Selenium ),我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/51122020/

24 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com