gpt4 book ai didi

python - Scrapy - 在一个 scrapy 脚本中抓取不同的网页

转载 作者:太空宇宙 更新时间:2023-11-03 10:54:59 30 4
gpt4 key购买 nike

我正在创建一个网络应用程序,用于从不同网站抓取一长串鞋子。这是我的两个单独的 scrapy 脚本:

http://store.nike.com/us/en_us/pw/mens-clearance-soccer-shoes/47Z7puZ896Zoi3

from scrapy import Spider
from scrapy.http import Request
class ShoesSpider(Spider):
name = "shoes"
allowed_domains = ["store.nike.com"]
start_urls = ['http://store.nike.com/us/en_us/pw/mens-clearance-soccer-shoes/47Z7puZ896Zoi3']
def parse(self, response):
shoes = response.xpath('//*[@class="grid-item-image-wrapper sprite-sheet sprite-index-0"]/a/@href').extract()
for shoe in shoes:
yield Request(shoe, callback=self.parse_shoes)

def parse_shoes(self, response):
url = response.url
name = response.xpath('//*[@itemprop="name"]/text()').extract_first()
price = response.xpath('//*[@itemprop="price"]/text()').extract_first()
price = price.replace('$','')
shoe_type = response.css('.exp-product-subtitle::text').extract_first()

sizes = response.xpath('//*[@class="nsg-form--drop-down exp-pdp-size-dropdown exp-pdp-dropdown two-column-dropdown"]/option')
sizes = sizes.xpath('text()[not(parent::option/@class="exp-pdp-size-not-in-stock selectBox-disabled")]').extract()
sizes = [s.strip() for s in sizes]
yield {
'url': url,
'name' : name,
'price' : price,
'sizes' : sizes,
'shoe_type': shoe_type
}

http://www.dickssportinggoods.com/products/clearance-soccer-cleats.jsp

    from scrapy import Spider
from scrapy.http import Request
class ShoesSpider(Spider):
name = "shoes"
allowed_domains = ["dickssportinggoods.com"]
start_urls = ['http://www.dickssportinggoods.com/products/clearance-soccer-cleats.jsp']
def parse(self, response):
shoes = response.xpath('//*[@class="fplpTitle header4"]/a/@href').extract()
for shoe in shoes:
yield Request(shoe, callback=self.parse_shoes)
def parse_shoes(self, response):
sizes = response.xpath('//*[@class="swatches clearfix"]/input/@value').extract()
if sizes == []:
pass
url = response.url
name = response.xpath('.//*[@id="PageHeading_3074457345618261107"]/h1/text()').extract_first()
price = response.xpath('.//*[@itemprop="price"]/text()').extract_first()
#shoe_type = response.css('.exp-product-subtitle::text').extract_first()
yield {
'url': url,
'name' : name,
'price' : price,
'sizes' : sizes,
'shoe_type': ''
}

我怎样才能把它们放在一起?我已经浏览了 scrapy 文档,但我没有看到他们提到这一点,它只是提到了如何从根地址中抓取两个地址。谢谢

最佳答案

将您的两个域放在 allowed_domains 中,将您的两个 URL 放在 start_urls 中,然后使用简单的 if-else 来确定要执行的代码部分。

from scrapy import Spider
from scrapy.http import Request
class ShoesSpider(Spider):
name = "shoes"
allowed_domains = ["store.nike.com", "dickssportinggoods.com"]
start_urls = ['http://store.nike.com/us/en_us/pw/mens-clearance-soccer-shoes/47Z7puZ896Zoi3', 'http://www.dickssportinggoods.com/products/clearance-soccer-cleats.jsp']
def parse(self, response):

if "store.nike.com" in response.url:
shoes = response.xpath('//*[@class="grid-item-image-wrapper sprite-sheet sprite-index-0"]/a/@href').extract()
elif "dickssportinggoods.com" in response.url:
shoes = response.xpath('//*[@class="fplpTitle header4"]/a/@href').extract()

for shoe in shoes:
yield Request(shoe, callback=self.parse_shoes)

def parse_shoes(self, response):
url = response.url

if "store.nike.com" in response.url:
name = response.xpath('//*[@itemprop="name"]/text()').extract_first()
price = response.xpath('//*[@itemprop="price"]/text()').extract_first()
price = price.replace('$','')
shoe_type = response.css('.exp-product-subtitle::text').extract_first()

sizes = response.xpath('//*[@class="nsg-form--drop-down exp-pdp-size-dropdown exp-pdp-dropdown two-column-dropdown"]/option')
sizes = sizes.xpath('text()[not(parent::option/@class="exp-pdp-size-not-in-stock selectBox-disabled")]').extract()
sizes = [s.strip() for s in sizes]
yield {
'url': url,
'name' : name,
'price' : price,
'sizes' : sizes,
'shoe_type': shoe_type
}
elif "dickssportinggoods.com" in response.url:
sizes = response.xpath('//*[@class="swatches clearfix"]/input/@value').extract()
if sizes == []:
pass
url = response.url
name = response.xpath('.//*[@id="PageHeading_3074457345618261107"]/h1/text()').extract_first()
price = response.xpath('.//*[@itemprop="price"]/text()').extract_first()
#shoe_type = response.css('.exp-product-subtitle::text').extract_first()

yield {
'url': url,
'name' : name,
'price' : price,
'sizes' : sizes,
'shoe_type': ''
}

关于python - Scrapy - 在一个 scrapy 脚本中抓取不同的网页,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/42637563/

30 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com