gpt4 book ai didi

python - Selenium Web Scrape - 为什么这个脚本返回 500k 行?

转载 作者:太空宇宙 更新时间:2023-11-03 21:17:38 26 4
gpt4 key购买 nike

我编写了一个脚本来抓取网站以获取特定类别中的所有产品信息,但当该特定类别中只有 3000 个商品时,我的代码返回 500 000 多行。

我对 Python 也很陌生,因此非常感谢您的帮助。

代码附在下面:

    # -*- coding: utf-8 -*-
"""
Created on Mon Feb 4 20:31:23 2019

@author:
"""
import requests
from selenium import webdriver
from selenium.webdriver.common.by import By
import selenium.webdriver.support.ui as ui
import selenium.webdriver.support.expected_conditions as EC
from bs4 import BeautifulSoup
import os, sys
import time
from urllib.parse import urljoin
import pandas as pd
import re
import numpy as np

# base set up

options = webdriver.ChromeOptions()
options.add_argument('--ignore-certificate-errors')
options.add_argument('--ignore-ssl-errors')
os.chdir("C:/Users/user/desktop/scripts/python")
cwd = os.getcwd()
main_dir = os.path.abspath(os.path.join(cwd, os.pardir))
print('Main Directory:', main_dir)

chromedriver = ("C:/Users/user/desktop/scripts/python/chromedriver.exe")
os.environ["webdriver.chrome.driver"] = chromedriver
# browser = webdriver.Chrome(options=options, executable_path=chromedriver)

mainurl = "https://www.bunnings.com.au/our-range"

headers = {'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.94 Safari/537.36'}
page = requests.get(mainurl, headers=headers)
soup = BeautifulSoup(page.content, 'html.parser')

# script start

subcat = []
for item in soup.findAll('ul', attrs={'class': 'chalkboard-menu'}):
links = item.find_all('a')
for link in links:
subcat.append(urljoin(mainurl, link.get("href")))
subcat

result = pd.DataFrame()
for adrs in subcat[0:1]:
# headers = {'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.94 Safari/537.36'}
# page = requests.get(adrs, headers=headers)
# soup = BeautifulSoup(page.content, 'html.parser')
# pagelink = adrs
# adrs="https://www.bunnings.com.au/our-range/storage-cleaning/cleaning/brushware-mops/indoor-brooms"
catProd = pd.DataFrame()
url = adrs
browser = webdriver.Chrome(options=options, executable_path=chromedriver)
browser.get(url)

lenOfPage = browser.execute_script("window.scrollTo(0, document.body.scrollHeight);var lenOfPage=document.body.scrollHeight;return lenOfPage;")
match = False
while (match == False):
lastCount = lenOfPage
time.sleep(3)
lenOfPage = browser.execute_script("window.scrollTo(0, document.body.scrollHeight);var lenOfPage=document.body.scrollHeight;return lenOfPage;")
if lastCount == lenOfPage:
match = True
reached= False
while (reached==False):
try:
browser.find_element_by_css_selector('#MoreProductsButton > span').click()
lenOfPage = browser.execute_script("window.scrollTo(0, document.body.scrollHeight);var lenOfPage=document.body.scrollHeight;return lenOfPage;")
match = True
while (match == True):
lastCount = lenOfPage
time.sleep(3)
lenOfPage = browser.execute_script("window.scrollTo(0, document.body.scrollHeight);var lenOfPage=document.body.scrollHeight;return lenOfPage;")
if lastCount == lenOfPage:
match = True
browser.find_element_by_css_selector('#content-layout_inside-anchor > div.search-result__content > div > div > section > div:nth-child(4) > div > div:nth-child(2) > div > button > div.view-more_btn_text').click()
except:
reached=True
# grab the items
page = browser.page_source
soup = BeautifulSoup(page, 'html.parser')
browser.close()

for article in soup.findAll('article', attrs={'class':'product-list__item hproduct special-order-product'}):
for product in article.findAll('img', attrs={'class': 'photo'}):
pName = product['alt']
pCat = adrs
pID = article['data-product-id']
temp= pd.DataFrame({'proID':[pID],'Product':[pName],'Category':[pCat]})
catProd=catProd.append(temp)
result = result.append(catProd)
time.sleep(3)
result.head()

#writes to CSV
writer = pd.ExcelWriter('test123123.xlsx')
result.to_excel(writer,'Sheet1')
writer.save()

代码需要大约 20 分钟来迭代 3000~ 个项目,在我看来这很疯狂,但主要问题仍然在于,当我只需要 3500 行时,我得到了太多的重复项和 500 000 行对于该特定类别。

最佳答案

问题就在这里:

for product in article.findAll('img', attrs={'class': 'photo'}):
pName = product['alt']
pCat = adrs
pID = article['data-product-id']
temp= pd.DataFrame({'proID':[pID],'Product':[pName],'Category':[pCat]}) #<-------------- temp DataFrame
catProd=catProd.append(temp) #<------------ temp appending into catProd dataframe
result = result.append(catProd) #<----------- catProd appending into result DataFrame

您基本上是在进行双重附加,它获取您的 temp 数据帧,并附加到您的 catProd 数据帧...然后紧接着附加到您的 结果 数据框。因此,您的结果数据框正在呈指数级增长。

有几种方法可以解决这个问题。一种是将您的 result = result.append(temp) 移出该循环,以便 catProd 附加到完整 catProd 之后的 result 已满。或者,直接删除您的 catProd 并继续附加到您的 结果 中。

我还清理了一些东西。 IE。重置数据帧的索引,并且不将索引包含在 Excel 写入中。我还添加了显式等待(即等待按钮显示),而不是 time.sleep,这应该会加快速度。

完整代码如下。不要忘记更改 for adrs in subcat[0:1],以便它遍历整个列表。我只是让它通过第一个网址。

最后一件事是我加入了一种计时方法。仅运行第一个网址,895 个产品并保存到 csv 中,持续时间:0 小时,02 分钟,48 秒

最后,我必须注释掉一些东西,比如 os.chdir 之类的东西,以便我可以运行它。所以不要忘记取消注释这些东西。

import requests
from selenium import webdriver
from selenium.webdriver.common.by import By
import selenium.webdriver.support.ui as ui
from selenium.webdriver.support.ui import WebDriverWait
import selenium.webdriver.support.expected_conditions as EC
from bs4 import BeautifulSoup
import os, sys
import time
from urllib.parse import urljoin
import pandas as pd
import re
import numpy as np
import datetime

# base set up
start_time = datetime.datetime.now()


options = webdriver.ChromeOptions()
options.add_argument('--ignore-certificate-errors')
options.add_argument('--ignore-ssl-errors')
#os.chdir("C:/Users/user/desktop/scripts/python")
#cwd = os.getcwd()
#main_dir = os.path.abspath(os.path.join(cwd, os.pardir))
#print('Main Directory:', main_dir)

chromedriver = ("C:/chromedriver_win32/chromedriver.exe")
os.environ["webdriver.chrome.driver"] = chromedriver
# browser = webdriver.Chrome(options=options, executable_path=chromedriver)

mainurl = "https://www.bunnings.com.au/our-range"

headers = {'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.94 Safari/537.36'}
page = requests.get(mainurl, headers=headers)
soup = BeautifulSoup(page.content, 'html.parser')

# script start

subcat = []
for item in soup.findAll('ul', attrs={'class': 'chalkboard-menu'}):
links = item.find_all('a')
for link in links:
subcat.append(urljoin(mainurl, link.get("href")))
subcat

result = pd.DataFrame()
for adrs in subcat:
# headers = {'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.94 Safari/537.36'}
# page = requests.get(adrs, headers=headers)
# soup = BeautifulSoup(page.content, 'html.parser')
# pagelink = adrs
# adrs="https://www.bunnings.com.au/our-range/storage-cleaning/cleaning/brushware-mops/indoor-brooms"
catProd = pd.DataFrame()
url = adrs
browser = webdriver.Chrome(options=options, executable_path=chromedriver)
browser.get(url)

lenOfPage = browser.execute_script("window.scrollTo(0, document.body.scrollHeight);var lenOfPage=document.body.scrollHeight;return lenOfPage;")
match = False
while (match == False):
lastCount = lenOfPage
#time.sleep(3)
lenOfPage = browser.execute_script("window.scrollTo(0, document.body.scrollHeight);var lenOfPage=document.body.scrollHeight;return lenOfPage;")
if lastCount == lenOfPage:
match = True
reached= False
while (reached==False):
try:
wait = WebDriverWait(browser, 10)
wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "#MoreProductsButton")))

browser.find_element_by_css_selector('#MoreProductsButton').click()
lenOfPage = browser.execute_script("window.scrollTo(0, document.body.scrollHeight);var lenOfPage=document.body.scrollHeight;return lenOfPage;")
match = True
while (match == True):
lastCount = lenOfPage
#time.sleep(3)
lenOfPage = browser.execute_script("window.scrollTo(0, document.body.scrollHeight);var lenOfPage=document.body.scrollHeight;return lenOfPage;")
if lastCount == lenOfPage:
match = True
#time.sleep(3)
wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "div.view-more_btn_text")))
browser.find_element_by_css_selector('#content-layout_inside-anchor > div.search-result__content > div > div > section > div:nth-child(4) > div > div:nth-child(2) > div > button > div.view-more_btn_text').click()
except:
reached=True
# grab the items
page = browser.page_source
soup = BeautifulSoup(page, 'html.parser')
browser.close()

for article in soup.findAll('article', attrs={'class':'product-list__item hproduct special-order-product'}):
for product in article.findAll('img', attrs={'class': 'photo'}):
pName = product['alt']
pCat = adrs
pID = article['data-product-id']
temp= pd.DataFrame({'proID':[pID],'Product':[pName],'Category':[pCat]})
#catProd=catProd.append(temp)
result = result.append(temp)
#time.sleep(3)
result.head()

result.reset_index(drop=True)

#writes to CSV
writer = pd.ExcelWriter('C:/test123123.xlsx')
result.to_excel(writer,'Sheet1', index=False)
writer.save()

finish_time = datetime.datetime.now()
duration = finish_time - start_time

dur_list = str(duration).split(':')
hour = dur_list[0]
minutes = dur_list[1]
seconds = dur_list[2].split('.')[0]

print ('Duration: %s Hours, %s Minutes, %s Seconds' %(hour, minutes, seconds))

关于python - Selenium Web Scrape - 为什么这个脚本返回 500k 行?,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/54564691/

26 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com