scrapy之分散式爬蟲scrapy-redis

我盤程式碼養你發表於2020-12-24

scrapy_redis的作用

Scrapy_redis在scrapy的基礎上實現了更多,更強大的功能,具體體現在:

通過持久化請求佇列和請求的指紋集合來實現:

  • 斷點續爬
  • 分散式快速抓取
    其他概念性的東西可自行百度。我們就只寫怎麼將普通爬蟲改寫為分散式爬蟲

    第一步:匯入分散式爬蟲類(抄官方)
    第二步:繼承分散式爬蟲類(記不住就抄)
    第三步:登出起始url和允許的域
    第四步:設定redis-key(隨便寫,看官網也行)
    第五步:設定–init–(抄官方例子)

    根據以前爬取頁面的不同,我們主要寫了crawlspider和普通的spider爬蟲,下面我們將這兩種爬蟲改寫為分散式爬蟲

    首先你要從git上下載官方模板
    git clone https://github.com/rolando/scrapy-redis.git

改寫crawlspider爬蟲

目標爬去有緣網的資訊
改寫後爬蟲如下

from scrapy.spiders import CrawlSpider, Rule
from youyuanwang.items import YouyuanwangItem
from scrapy.linkextractors import LinkExtractor
# 第一步 匯入需要的類
from scrapy_redis.spiders import RedisCrawlSpider


# 第二步 繼承類
class MarrigeSpider(RedisCrawlSpider):
    name = 'marrige'
    # 第三步 登出起始的網址和允許的域
    # allowed_domains = ['youyuan.com']
    # start_urls = ['http://www.youyuan.com/find/xian/mm18-0/advance-0-0-0-0-0-0-0/p1/']

    # 第四步 設定redis——key
    redis_key = 'guoshaosong'

    # 第五步 通過init設定允許的域
    rules = (
        Rule(LinkExtractor(allow=r'^.*youyuan.*xian.*'), callback='parse_item', follow=True),
    )

    # print(rules)

    def __init__(self, *args, **kwargs):
        # Dynamically define the allowed domains list.
        domain = kwargs.pop('domain', '')
        self.allowed_domains = filter(None, domain.split(','))
        super(MarrigeSpider, self).__init__(*args, **kwargs)#為當前類名

    def parse_item(self, response):
        item = YouyuanwangItem()
        student_list = response.xpath('//div[@class="student"]/ul/li')
        for li in student_list:
            item['name'] = li.xpath('./dl/dd/a[1]/strong/text()').extract_first()
            item['desc'] = li.xpath('./dl/dd/font//text()').extract()
            item['img'] = li.xpath('./dl/dt/a/img/@src').extract_first()
            yield item

下來是settings,也是抄官方

# Scrapy settings for youyuanwang project
#
# For simplicity, this file contains only the most important settings by
# default. All the other settings are documented here:
#
#     http://doc.scrapy.org/topics/settings.html
#
SPIDER_MODULES = ['youyuanwang.spiders']
NEWSPIDER_MODULE = 'youyuanwang.spiders'

USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.66 Safari/537.36'

DUPEFILTER_CLASS = "scrapy_redis.dupefilter.RFPDupeFilter"
SCHEDULER = "scrapy_redis.scheduler.Scheduler"
SCHEDULER_PERSIST = False
#SCHEDULER_QUEUE_CLASS = "scrapy_redis.queue.SpiderPriorityQueue"
#SCHEDULER_QUEUE_CLASS = "scrapy_redis.queue.SpiderQueue"
#SCHEDULER_QUEUE_CLASS = "scrapy_redis.queue.SpiderStack"

ITEM_PIPELINES = {
    # 'youyuanwang.pipelines.YouyuanwangPipeline': 300,
    'scrapy_redis.pipelines.RedisPipeline': 400,
}

LOG_LEVEL = 'DEBUG'

REDIS_HOST = '127.0.0.1'
REDIS_PORT = 6379
# REDIS_PASS = 'root'
SPIDER_MIDDLEWARES = { 'scrapy.spidermiddlewares.offsite.OffsiteMiddleware': None, }
# Introduce an artifical delay to make use of parallelism. to speed up the
# crawl.
DOWNLOAD_DELAY = 1


執行,先給redis資料庫扔一個起始連結
lpush youyuanwang:start_urls http://www.youyuan.com/find/xian/mm18-0/advance-0-0-0-0-0-0-0/p1/
然後pycharm執行
scrapy runspider 爬蟲名

中間可以暫停一下,看到底是不是斷點續爬

改寫spider爬蟲

過程都一樣,不會就抄官方,主要是來打個樣
爬蟲目標:爬去新浪新聞的內容

import scrapy
from news.items import NewsItem
# 第一步:匯入必要的模組
from scrapy_redis.spiders import RedisSpider


# 第二步:更改繼承類
class SinaNewsSpider(RedisSpider):
    name = 'sina_news'

    # 第三步:註釋允許的域和起始url
    # allowed_domains = ['sina.com.cn']
    # start_urls = ['http://news.sina.com.cn/guide/']
    # 第四步:設定redis-key
    redis_key = 'myspider:start_urls'

    # 第五步:匯入配置函式
    def __init__(self, *args, **kwargs):
        # Dynamically define the allowed domains list.
        domain = kwargs.pop('domain', '')
        self.allowed_domains = filter(None, domain.split(','))
        super(SinaNewsSpider, self).__init__(*args, **kwargs)


    # 第六步:修改setting檔案,全部選中是c+s+a+j
    def parse(self, response):
        # items = []
        # 所有大類的url 和 標題
        parentUrls = response.xpath('//div[@id="tab01"]/div/h3/a/@href').extract()
        parentTitle = response.xpath('//div[@id="tab01"]/div/h3/a/text()').extract()

        # 所有小類的ur 和 標題
        subUrls = response.xpath('//div[@id="tab01"]/div/ul/li/a/@href').extract()
        subTitle = response.xpath('//div[@id="tab01"]/div/ul/li/a/text()').extract()

        # 判斷小標籤是否屬於大標籤
        for i in range(0, len(parentUrls)):
            for j in range(0, len(subUrls)):
                item = NewsItem()
                item['headline'] = parentTitle[i]
                item['headline_url'] = parentUrls[i]
                # 對一個大分類,如果我是你的分類,就拼接這個item物件
                if subUrls[j].startswith(item['headline_url']):
                    item['subtitle'] = subTitle[i]
                    item['subtitle_url'] = subUrls[j]
                    yield scrapy.Request(url=item['subtitle_url'], callback=self.subtitle_parse, meta={'meta_1': item})

    def subtitle_parse(self, response):
        item = NewsItem()
        meta_1 = response.meta['meta_1']
        item['content'] = response.xpath('//title/text()').extract_first()
        item['headline'] = meta_1['headline']
        item['headline_url'] = meta_1['headline_url']
        item['subtitle'] = meta_1['subtitle']
        item['subtitle_url'] = meta_1['subtitle_url']
        yield item

settings

# Scrapy settings for news project
#
# For simplicity, this file contains only the most important settings by
# default. All the other settings are documented here:
#
#     http://doc.scrapy.org/topics/settings.html
#
SPIDER_MODULES = ['news.spiders']
NEWSPIDER_MODULE = 'news.spiders'

USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.66 Safari/537.36'

DUPEFILTER_CLASS = "scrapy_redis.dupefilter.RFPDupeFilter"
SCHEDULER = "scrapy_redis.scheduler.Scheduler"
SCHEDULER_PERSIST = True
#SCHEDULER_QUEUE_CLASS = "scrapy_redis.queue.SpiderPriorityQueue"
#SCHEDULER_QUEUE_CLASS = "scrapy_redis.queue.SpiderQueue"
#SCHEDULER_QUEUE_CLASS = "scrapy_redis.queue.SpiderStack"

ITEM_PIPELINES = {
    # 'news.pipelines.ExamplePipeline': 300,
    'scrapy_redis.pipelines.RedisPipeline': 400,
}

# 配置redis資料庫
REDIS_HOST = "127.0.0.1"
REDIS_PORT = 6379

LOG_LEVEL = 'DEBUG'
SPIDER_MIDDLEWARES = { 'scrapy.spidermiddlewares.offsite.OffsiteMiddleware': None, }
# Introduce an artifical delay to make use of parallelism. to speed up the
# crawl.
DOWNLOAD_DELAY = 1


到這基本就結束,我個人還去看了看分散式爬蟲的部署以及管理,但因為我是win10教育版,docker安裝等步驟總是出錯,就不再折騰了,主要有scrapyd,以及gerapy等技術。csdn上都可以搜到相關部落格,感興趣自己研究。。。。。。

相關文章