資料分析專案(一)——爬蟲篇

楊昊啊~發表於2018-11-30

想做一個資料分析的專案,需要資料,剛好前些天學了爬蟲,突然想自己爬取資料了,我爬取的是前程無憂網。用的是scrapy框架爬取的。下面是程式碼:
首先是建立工程:scrapy startproject 工程名
我的是:scrapy startproject job1
進入工程:cd job1
在工程目錄下建立專案:scrapy genspider 專案名 專案網址
scrapy genspider 51job 51job.com
目錄如下圖:
在這裡插入圖片描述
接著就是程式碼啦。
a51job.py

# -*- coding: utf-8 -*-
import scrapy

from ..items import Job1Item


class A51jobSpider(scrapy.Spider):
    name = '51job'
    allowed_domains = ['51job.com']
    def __init__(self,place='全國',kw='資料分析',**kwargs):
        # super().__init__()
        self.place = place
        self.kw = kw
        self.place_code = {
            # '杭州': '080200',
            # '上海': '020000',
            '全國':'000000',
        }
        self.start_urls = [
            'https://search.51job.com/list/{place_code},000000,0000,00,9,99,{kw},2,1.html?lang=c&stype=1&postchannel=0000&workyear=99&cotype=99&degreefrom=99&jobterm=99&companysize=99&lonlat=0%2C0&radius=-1&ord_field=0&confirmdate=9&fromType=&dibiaoid=0&address=&line=&specialarea=00&from=&welfare='.format(
                place_code=self.place_code[self.place], kw=self.kw)]

    # start_urls = ['http://51job.com/']

    def parse(self, response):
        with open(response.url.split('?')[0][-7:],'wb') as f:
            f.write(response.body)
        jobs = response.xpath('//*[@id="resultList"]/div[@class="el"]')
        for job in jobs:
            # item = {}
            item = Job1Item()
            item['name'] = job.xpath('string(.//p[contains(@class,"t1")])').get().strip()
            item['company'] = job.xpath('string(.//span[@class="t2"])').get().strip()
            item['place'] = job.xpath('string(.//span[@class="t3"])').get().strip()
            item['salary'] = job.xpath('string(.//span[@class="t4"])').get().strip()
            item['post_time'] = job.xpath('string(.//span[@class="t5"])').get().strip()

            yield item
            # print('我生成了一條資料',item)

        next_page = response.xpath('//a[text()="下一頁"]')
        if next_page:
            # 獲得絕對地址
            next_page_url = next_page.xpath('.//@href').get()
            # 生成請求
            yield response.follow(next_page_url)
            # or yield scrapy.Request(next_page_url,callback=self.parse)

    def get_job_info(self,response):
        pass

items.py

# -*- coding: utf-8 -*-

# Define here the models for your scraped items
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/items.html

import scrapy


class Job1Item(scrapy.Item):
    # define the fields for your item here like:
    # name = scrapy.Field()
    name = scrapy.Field()
    company = scrapy.Field()
    place = scrapy.Field()
    salary = scrapy.Field()
    post_time = scrapy.Field()

middlewares.py

# -*- coding: utf-8 -*-

# Define here the models for your spider middleware
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html

from scrapy import signals


class Job1SpiderMiddleware(object):
    # Not all methods need to be defined. If a method is not defined,
    # scrapy acts as if the spider middleware does not modify the
    # passed objects.

    @classmethod
    def from_crawler(cls, crawler):
        # This method is used by Scrapy to create your spiders.
        s = cls()
        crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
        return s

    def process_spider_input(self, response, spider):
        # Called for each response that goes through the spider
        # middleware and into the spider.

        # Should return None or raise an exception.
        return None

    def process_spider_output(self, response, result, spider):
        # Called with the results returned from the Spider, after
        # it has processed the response.

        # Must return an iterable of Request, dict or Item objects.
        for i in result:
            yield i

    def process_spider_exception(self, response, exception, spider):
        # Called when a spider or process_spider_input() method
        # (from other spider middleware) raises an exception.

        # Should return either None or an iterable of Response, dict
        # or Item objects.
        pass

    def process_start_requests(self, start_requests, spider):
        # Called with the start requests of the spider, and works
        # similarly to the process_spider_output() method, except
        # that it doesn’t have a response associated.

        # Must return only requests (not items).
        for r in start_requests:
            yield r

    def spider_opened(self, spider):
        spider.logger.info('Spider opened: %s' % spider.name)


class Job1DownloaderMiddleware(object):
    # Not all methods need to be defined. If a method is not defined,
    # scrapy acts as if the downloader middleware does not modify the
    # passed objects.

    @classmethod
    def from_crawler(cls, crawler):
        # This method is used by Scrapy to create your spiders.
        s = cls()
        crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
        return s

    def process_request(self, request, spider):
        # Called for each request that goes through the downloader
        # middleware.

        # Must either:
        # - return None: continue processing this request
        # - or return a Response object
        # - or return a Request object
        # - or raise IgnoreRequest: process_exception() methods of
        #   installed downloader middleware will be called
        return None

    def process_response(self, request, response, spider):
        # Called with the response returned from the downloader.

        # Must either;
        # - return a Response object
        # - return a Request object
        # - or raise IgnoreRequest
        return response

    def process_exception(self, request, exception, spider):
        # Called when a download handler or a process_request()
        # (from other downloader middleware) raises an exception.

        # Must either:
        # - return None: continue processing this exception
        # - return a Response object: stops process_exception() chain
        # - return a Request object: stops process_exception() chain
        pass

    def spider_opened(self, spider):
        spider.logger.info('Spider opened: %s' % spider.name)

pipelines.py

# -*- coding: utf-8 -*-

# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
import time
import pymongo
import pymysql


class Job1Pipeline(object):
    def open_spider(self,spider):
        print('開啟爬蟲',spider)
        self.conn=pymysql.connect(host='127.0.0.1', port=3306, user='root',db='job', password='1234', charset='utf8')
        self.cursor=self.conn.cursor()

    def process_item(self, item, spider):
        time.sleep(0.1)
        print('item經過了管道1')

        print('name' in item)
        item=dict(item)
        print(item)
        return item

    def close_spider(self,spider):

        print('關閉爬蟲',spider)

class Job1Pipeline2(object):
    def process_item(self,item,spider):
        time.sleep(1)
        print('item經過了管道2')
        return None

class Job1Pipeline3(object):
    def process_item(self,item,spider):
        print('3.',item)

        print('item經過了管道3')
        return item

class Job1MySQLPipeline(object):
    def open_spider(self,spider):
        self.conn = pymysql.connect(host='127.0.0.1', port=3306, user='root',db='job', password='****', charset='utf8')
        self.cursor = self.conn.cursor()

    def process_item(self, item, spider):
        sql = 'insert into jobs (name,company,place,salary,post_time) VALUES (%s,%s,%s,%s,%s);'
        self.cursor.execute(sql, (item['name'], item['company'], item['place'], item['salary'], item['post_time']))
        self.conn.commit()
        return item


    def close_spider(self, spider):
        """
        關閉資料庫連線
        """
        self.cursor.close()
        self.conn.close()


class Job1MongoDBPipeline(object):
    def open_spider(self, spider):
        # 建立資料庫連結
        self.client = pymongo.MongoClient(host='127.0.0.1', port=27017)
        self.db = self.client['job']
        self.coll = self.db['job_collection']

    def process_item(self, item, spider):
        """
        把item插入資料庫
        """
        self.coll.insert(dict(item))
        return item

    def close_spider(self, spider):
        """
        關閉資料庫連線
        """
        self.client.close()

settings.py

# -*- coding: utf-8 -*-

# Scrapy settings for job1 project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
#     https://doc.scrapy.org/en/latest/topics/settings.html
#     https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
#     https://doc.scrapy.org/en/latest/topics/spider-middleware.html

BOT_NAME = 'job1'

SPIDER_MODULES = ['job1.spiders']
NEWSPIDER_MODULE = 'job1.spiders'


# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'job1 (+http://www.yourdomain.com)'
USER_AGENT='Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36'
# Obey robots.txt rules
ROBOTSTXT_OBEY = False

# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32

# Configure a delay for requests for the same website (default: 0)
# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
DOWNLOAD_DELAY =0.5
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16

# Disable cookies (enabled by default)
#COOKIES_ENABLED = False

# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False

# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
#   'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
#   'Accept-Language': 'en',
#}

# Enable or disable spider middlewares
# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
#    'job1.middlewares.Job1SpiderMiddleware': 543,
#}

# Enable or disable downloader middlewares
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
#    'job1.middlewares.Job1DownloaderMiddleware': 543,
#}

# Enable or disable extensions
# See https://doc.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
#    'scrapy.extensions.telnet.TelnetConsole': None,
#}

# Configure item pipelines
# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
   'job1.pipelines.Job1MySQLPipeline': 301,
   'job1.pipelines.Job1MongoDBPipeline': 302,
}

# Enable and configure the AutoThrottle extension (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False

# Enable and configure HTTP caching (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'

另外還要在專案的同級目錄下新建一個.py檔案
在這裡插入圖片描述
程式碼如下:

from scrapy import cmdline
cmdline.execute('scrapy crawl 51job'.split())

我做的是連線了mysql資料庫的
建立資料庫:create database 資料庫名;
create database job;
使用資料庫:use job;
建立資料表:create table jobs(id int primary key auto_increment,name varchar(50),company varchar(50),place varchar(50),salary varchar(50),post_time varchar(50));
檢視錶結構:desc jobs;(如下圖)
在這裡插入圖片描述
執行程式:scrapy crawl 51job --nolog
檢視資料庫中資料:select * from jobs;
在這裡插入圖片描述

相關文章