gpt4 book ai didi

python - Scrapy 跳过/浏览/不是来自/浏览/引用者

转载 作者:行者123 更新时间:2023-12-01 05:25:33 33 4
gpt4 key购买 nike

我遇到一个问题,即我的抓取工具正在跳过没有浏览引荐来源网址的浏览页面。我正在尝试解析 URL 中包含/browse/的所有页面,无论引用者如何。

<小时/>

以下是我的代码(根据 paul t 更新):

from scrapy.contrib.spiders import CrawlSpider,Rule
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request
from wallspider.items import Website
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor

class anchorspider(CrawlSpider):
name = "newbrowsepages"
allowed_domains = ["mydomain.com"]
start_urls = ["http://www.mydomain.com/"]

rules = (
Rule (SgmlLinkExtractor(allow=('/browse/', ),)
, callback="parse_links", follow= True, process_links=lambda links: [link for link in links if not link.nofollow],),
Rule(SgmlLinkExtractor(allow=(),deny=('/[1-9]$', '(bti=)[1-9]+(?:\.[1-9]*)?', '(sort_by=)[a-zA-Z]', '(sort_by=)[1-9]+(?:\.[1-9]*)?', '(ic=32_)[1-9]+(?:\.[1-9]*)?', '(ic=60_)[0-9]+(?:\.[0-9]*)?', '(search_sort=)[1-9]+(?:\.[1-9]*)?', 'browse-ng.do\?', '/page/', '/ip/', 'out\+value', 'fn=', 'customer_rating', 'special_offers', 'search_sort=&', 'facet=' ))),
)

def parse_start_url(self, response):
return list(self.parse_links(response))

def parse_links(self, response):
hxs = HtmlXPathSelector(response)
links = hxs.select('//a')
domain = 'http://www.mydomain.com'
for link in links:
class_text = ''.join(link.select('./@class').extract())
title = ''.join(link.select('./@class').extract())
url = ''.join(link.select('./@href').extract())
meta = {'title':title,}
meta = {'class_text':class_text,}
yield Request(domain+url, callback = self.parse_page, meta=meta,)

def parse_page(self, response):
hxs = HtmlXPathSelector(response)
sites = hxs.select('//html')
for site in sites:
item = Website()
item['class_text']=response.meta['class_text']
item['url'] = response.url
item['title'] = site.xpath('/html/head/title/text()').extract()
item['referer'] = response.request.headers.get('Referer')
item['canonical'] = site.xpath('//head/link[@rel="canonical"]/@href').extract()
yield item

我的控制台日志:

2014-01-28 12:22:03-0800 [newbrowsepages] DEBUG: Crawled (200) <GET http://www.mydomain.com/ip/Ad-tech-Ultimate-Strength-Mini-8-Glue-Sticks-24-ct/17404367> (referer: http://www.mydomain.com/browse/crafts/other-arts-crafts/2637_667479_1043549/?amp;ic=48_0&amp;ref=+422937&catNavId=667479&povid=P1171-C1110.2784+1455.2776+1115.2956-L352)
2014-01-28 12:22:03-0800 [newbrowsepages] DEBUG: Crawled (200) <GET http://www.mydomain.com/ip/Wood-Scrabble-Pendant-Tiles-Rectangle-18x20mm-100/30108666> (referer: http://www.mydomain.com/browse/crafts/other-arts-crafts/2637_667479_1043549/?amp;ic=48_0&amp;ref=+422937&catNavId=667479&povid=P1171-C1110.2784+1455.2776+1115.2956-L352)
2014-01-28 12:22:03-0800 [newbrowsepages] DEBUG: Crawled (200) <GET http://www.mydomain.com/browse/apparel/women/5438_133162/> (referer: http://www.mydomain.com/browse/apparel/backpacks/5438_1045799_1045801_133211/?_refineresult=true&povid=P1171-C1110.2784+1455.2776+1115.2956-L136)
2014-01-28 12:22:03-0800 [newbrowsepages] DEBUG: Scraped from <200 http://www.mydomain.com/ip/Advantus-Paper-Holder/24575774>
{'canonical': [u'http://www.mydomain.com/ip/Advantus-Paper-Holder/24575774'],
'class_text': '',
'referer': 'http://www.mydomain.com/browse/crafts/craft-storage/2637_667479_1021741/?amp;ic=48_0&amp;ref=+420081&catNavId=667479&povid=P1171-C1110.2784+1455.2776+1115.2956-L357',
'title': [u'Advantus Paper Holder: Crafts : mydomain.com '],
'url': 'http://www.mydomain.com/ip/Advantus-Paper-Holder/24575774'}
2014-01-28 13:45:36-0800 [newbrowsepages] DEBUG: Crawled (200) <GET http://www.mydomain.com/browse/party-occasions/plants-artificial-flowers/2637_79907/?_refineresult=true&povid=P1171-C1110.2784+1455.2776+1115.2956-L355> (referer: http://www.mydomain.com/)
2014-01-28 13:45:36-0800 [newbrowsepages] DEBUG: Redirecting (301) to <GET http://www.mydomain.com/browse/crafts/craft-storage/2637_667479_1021741/?amp;ic=48_0&amp;ref=+420081&catNavId=667479&povid=P1171-C1110.2784+1455.2776+1115.2956-L357> from <GET http://www.mydomain.com/browse/_/N-904x?amp%3Bic=48_0&amp%3Bref=+420081&catNavId=667479&povid=P1171-C1110.2784+1455.2776+1115.2956-L357>
2014-01-28 13:45:37-0800 [newbrowsepages] DEBUG: Crawled (200) <GET http://www.mydomain.com/browse/party-occasions/art-supplies/2637_667479_1094401/?_refineresult=true&povid=P1171-C1110.2784+1455.2776+1115.2956-L354> (referer: http://www.mydomain.com/)

将规则更改为:

rules = (
Rule(SgmlLinkExtractor(allow=(),), follow= True,),
Rule(SgmlLinkExtractor(allow=('/browse/', ),deny=('/[1-9]$', '(bti=)[1-9]+(?:\.[1-9]*)?', '(sort_by=)[a-zA-Z]', '(sort_by=)[1-9]+(?:\.[1-9]*)?', '(ic=32_)[1-9]+(?:\.[1-9]*)?', '(ic=60_)[0-9]+(?:\.[0-9]*)?', '(search_sort=)[1-9]+(?:\.[1-9]*)?', 'browse-ng.do\?', '/page/', '/ip/', 'out\+value', 'fn=', 'customer_rating', 'special_offers', 'search_sort=&', 'facet=' )), callback="parse_links"),

最佳答案

我在这里至少看到 3 个问题:

  • 您的第一条规则引用了“parse_items”,但尚未定义
  • 您的 parse_start_url 应返回您正在构建的列表
  • parse_page 中,您应该在每次循环迭代中使用 item = Website(),并在每次迭代结束时使用 yield item

带有这些修复的蜘蛛代码:

from scrapy.contrib.spiders import CrawlSpider,Rule
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request
from wallspider.items import Website
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor

class classspider(CrawlSpider):
name = "newbrowsepages"
allowed_domains = ["mydomain.com"]
start_urls = ["http://www.mydomain.com/"]

rules = (
Rule (SgmlLinkExtractor(allow=('/browse/', ),)
, callback="parse_items", follow= True, process_links=lambda links: [link for link in links if not link.nofollow],),
Rule(SgmlLinkExtractor(allow=(),deny=('/[1-9]$', '(bti=)[1-9]+(?:\.[1-9]*)?', '(sort_by=)[a-zA-Z]', '(sort_by=)[1-9]+(?:\.[1-9]*)?', '(ic=32_)[1-9]+(?:\.[1-9]*)?', '(ic=60_)[0-9]+(?:\.[0-9]*)?', '(search_sort=)[1-9]+(?:\.[1-9]*)?', 'browse-ng.do\?', '/page/', '/ip/', 'out\+value', 'fn=', 'customer_rating', 'special_offers', 'search_sort=&', 'facet=' ))),
)

def parse_start_url(self, response):
return list(self.parse_links(response))

def parse_links(self, response):
hxs = HtmlXPathSelector(response)
links = hxs.select('//a')
domain = 'http://www.mydomain.com'
for link in links:
class_text = ''.join(link.select('./@class').extract())
title = ''.join(link.select('./@class').extract())
url = ''.join(link.select('./@href').extract())
meta = {'title':title,}
meta = {'class_text':class_text,}
yield Request(domain+url, callback = self.parse_page, meta=meta,)

def parse_page(self, response):
hxs = HtmlXPathSelector(response)
sites = hxs.select('//html')
for site in sites:
item = Website()
item['class_text']=response.meta['class_text']
item['url'] = response.url
item['title'] = site.xpath('/html/head/title/text()').extract()
item['referer'] = response.request.headers.get('Referer')
item['canonical'] = site.xpath('//head/link[@rel="canonical"]/@href').extract()
yield item

关于python - Scrapy 跳过/浏览/不是来自/浏览/引用者,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/21416254/

33 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com