gpt4 book ai didi

python - Scrapy 将子站点项与站点项合并

转载 作者:太空宇宙 更新时间:2023-11-04 10:10:17 25 4
gpt4 key购买 nike

我正在尝试从子站点抓取详细信息并与从站点抓取的详细信息合并。我一直在通过 stackoverflow 以及文档进行研究。但是,我仍然无法让我的代码工作。似乎我从子站点中提取其他详细信息的功能不起作用。如果有人能看一下,我将不胜感激。

# -*- coding: utf-8 -*-
from scrapy.spiders import Spider
from scrapy.selector import Selector
from scrapeInfo.items import infoItem
import pyodbc


class scrapeInfo(Spider):
name = "info"
allowed_domains = ["http://www.nevermind.com"]
start_urls = []

def start_requests(self):

#Get infoID and Type from database
self.conn = pyodbc.connect('DRIVER={SQL Server};SERVER=server;DATABASE=dbname;UID=user;PWD=password')
self.cursor = self.conn.cursor()
self.cursor.execute("SELECT InfoID, category FROM dbo.StageItem")

rows = self.cursor.fetchall()

for row in rows:
url = 'http://www.nevermind.com/info/'
InfoID = row[0]
category = row[1]
yield self.make_requests_from_url(url+InfoID, InfoID, category, self.parse)

def make_requests_from_url(self, url, InfoID, category, callback):
request = Request(url, callback)
request.meta['InfoID'] = InfoID
request.meta['category'] = category
return request

def parse(self, response):
hxs = Selector(response)
infodata = hxs.xpath('div[2]/div[2]') # input item path

itemPool = []

InfoID = response.meta['InfoID']
category = response.meta['category']

for info in infodata:
item = infoItem()
item_cur, item_hist = InfoItemSubSite()

# Stem Details
item['id'] = InfoID
item['field'] = info.xpath('tr[1]/td[2]/p/b/text()').extract()
item['field2'] = info.xpath('tr[2]/td[2]/p/b/text()').extract()
item['field3'] = info.xpath('tr[3]/td[2]/p/b/text()').extract()
item_cur['field4'] = info.xpath('tr[4]/td[2]/p/b/text()').extract()
item_cur['field5'] = info.xpath('tr[5]/td[2]/p/b/text()').extract()
item_cur['field6'] = info.xpath('tr[6]/td[2]/p/b/@href').extract()

# Extract additional information about item_cur from refering site
# This part does not work
if item_cur['field6'] = info.xpath('tr[6]/td[2]/p/b/@href').extract():
url = 'http://www.nevermind.com/info/sub/' + item_cur['field6'] = info.xpath('tr[6]/td[2]/p/b/@href').extract()[0]
request = Request(url, housingtype, self.parse_item_sub)
request.meta['category'] = category
yield self.parse_item_sub(url, category)
item_his['field5'] = info.xpath('tr[5]/td[2]/p/b/text()').extract()
item_his['field6'] = info.xpath('tr[6]/td[2]/p/b/text()').extract()
item_his['field7'] = info.xpath('tr[7]/td[2]/p/b/@href').extract()

item['subsite_dic'] = [dict(item_cur), dict(item_his)]

itemPool.append(item)
yield item
pass

# Function to extract additional info from the subsite, and return it to the original item.
def parse_item_sub(self, response, category):
hxs = Selector(response)
subsite = hxs.xpath('div/div[2]') # input base path

category = response.meta['category']

for i in subsite:
item = InfoItemSubSite()
if (category == 'first'):
item['subsite_field1'] = i.xpath('/td[2]/span/@title').extract()
item['subsite_field2'] = i.xpath('/tr[4]/td[2]/text()').extract()
item['subsite_field3'] = i.xpath('/div[5]/a[1]/@href').extract()
else:
item['subsite_field1'] = i.xpath('/tr[10]/td[3]/span/@title').extract()
item['subsite_field2'] = i.xpath('/tr[4]/td[1]/text()').extract()
item['subsite_field3'] = i.xpath('/div[7]/a[1]/@href').extract()
return item
pass

我一直在查看这些示例以及许多其他示例(stackoverflow 非常适合!),以及 scrapy 文档,但仍然无法理解我如何从一个函数获取详细信息并与从原始函数中删除项目。

how do i merge results from target page to current page in scrapy? How can i use multiple requests and pass items in between them in scrapy python

最佳答案

您在这里看到的称为请求链。您的问题是 - 从多个请求中产生一个项目。一个解决方案是在请求 meta 属性中携带您的项目时链接请求。
示例:

def parse(self, response):
item = MyItem()
item['name'] = response.xpath("//div[@id='name']/text()").extract()
more_page = # some page that offers more details
# go to more page and take your item with you.
yield Request(more_page,
self.parse_more,
meta={'item':item})


def parse_more(self, response):
# get your item from the meta
item = response.meta['item']
# fill it in with more data and yield!
item['last_name'] = response.xpath("//div[@id='lastname']/text()").extract()
yield item

关于python - Scrapy 将子站点项与站点项合并,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/38753743/

25 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com