Scrapy框架-Spider和CrawlSpider的區別

Bricker666發表於2019-02-15

1.目標

http://wz.sun0769.com/index.php/question/questionType?type=4&page=

爬取每個頁面連結的內部內容和投訴資訊

2.方法1:通過Spider爬取

# -*- coding: utf-8 -*-
import scrapy
from dongguanSpider.items import DongguanItem

class SunSpider(scrapy.Spider):
    name = `sun`
    allowed_domains = [`wz.sun0769.com`]
    url = `http://wz.sun0769.com/index.php/question/questionType?type=4&page=`
    offset = 0

    start_urls = [url + str(offset)]


    def parse(self, response):
        # 每一頁的所有帖子的連結集合
        links = response.xpath(`//div[@class="greyframe"]/table//td/a[@class="news14"]/@href`).extract()
        # 迭代取出集合裡的連結
        for link in links:
            # 提取列表裡每個帖子的連結,傳送請求並呼叫parse——item來處理
            yield scrapy.Request(link, callback=self.parse_item)

        # 頁面終止條件成立前,會一直自增offset的值,併傳送新的頁面請求,呼叫parse方法處理
        if self.offset<=71160:
            self.offset +=30

            yield scrapy.Request(self.url + str(self.offset), callback=self.parse)


    def parse_item(self, response):

        item = DongguanItem()

        item[`title`] = response.xpath(`//div[@class="wzy1"]/table[1]//tr/td[2]/span[1]/text()`).extract()[0].split(`:`)[-1]
        item[`url`] = response.url
        item[`number`] = response.xpath(`//div[@class="wzy1"]/table[1]//tr/td[2]/span[2]/text()`).extract()[0].split(`:`)[-1]

        # 是否是圖片
        content_pic = response.xpath(`//div[@class="textpic"]/img/@src`).extract()

        if len(content_pic)==0:
            content_no_pic = response.xpath(`//div[@class="wzy1"]/table[2]//tr/td/text()`).extract()[0]
            item[`content`] = "".join(content_no_pic).replace("xa0", "")
        else:
            item[`content`] = "".join(content_pic[0]).replace("xa0", "")

        yield item

3. 通過CrawlSpider爬取

# -*- coding: utf-8 -*-
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from dongguan.items import DongguanItem


class SunSpider(CrawlSpider):
    name = `sun`
    allowed_domains = [`wz.sun0769.com`]
    start_urls = [`http://wz.sun0769.com/index.php/question/questionType?type=4&page=30`]

    rules = [
        Rule(LinkExtractor(allow=(`type=4&page=d+`))),
        Rule(LinkExtractor(allow = (`/html/question/d+/d+.shtml`)), callback = `parseDongguan`)
    ]

    def parseDongguan(self, response):

        item = DongguanItem()


        item[`title`] = response.xpath(`//div[@class="wzy1"]/table[1]//tr/td[2]/span[1]/text()`).extract()[0].split(`:`)[-1]
        item[`url`] = response.url
        item[`number`] = response.xpath(`//div[@class="wzy1"]/table[1]//tr/td[2]/span[2]/text()`).extract()[0].split(`:`)[-1]

        # 是否是圖片
        content_pic = response.xpath(`//div[@class="textpic"]/img/@src`).extract()

        if len(content_pic)==0:
            content_no_pic = response.xpath(`//div[@class="wzy1"]/table[2]//tr/td/text()`).extract()[0]
            item[`content`] = "".join(content_no_pic).replace("xa0", "")
        else:
            item[`content`] = "".join(content_pic[0]).replace("xa0", "")

        yield item

相關文章