頂點小說進階(多程序+協程)
建議:
看之前可以先看我之前釋出的文章(非同步最佳化與資料入庫: 頂點小說爬蟲進階實戰)
這篇文章基於上篇文章:進行了多程序處理,大大加快了爬取速度
案例:頂點小說完善(多程序)
最佳化思路:
- 導包:from multiprocessing import Pool
- 對於每一頁的所有小說採用一個程序,建立程序池,for迴圈處向程序池新增任務(對於每一頁的所有小說的處理封裝成一個方法作為任務新增到程序池)
import asyncio
import logging
import time
import requests
from lxml import etree
import aiohttp
import aiomysql
from aiohttp import ContentTypeError
from multiprocessing import Pool
CONCURRENCY = 4
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s : %(message)s')
class Spider(object):
def __init__(self):
# 方便設定頭部資訊、代理IP、cookie資訊等
self.session = None
# 設定協程數量
self.semaphore = asyncio.Semaphore(CONCURRENCY)
# 限制協程的併發數:
# 如果併發數沒有達到限制: 那麼async with semaphore會瞬間執行完成,進入裡面的正式程式碼中
# 如果併發數已經達到了限制,那麼其他的協程物件會阻塞在asyn with semaphore這個地方,直到正在執行的某個協程物件完成了,退出了,才會放行一個新的協程物件去替換掉這個已經完成的協程物件
# 初始化資料庫連線池
async def init_pool(self):
self.pool = await aiomysql.create_pool(
host="127.0.0.1",
port=3306,
user="root",
password="123456",
db=f"dingdian",
autocommit=True # Ensure autocommit is set to True for aiomysql
)
# 在 aiomysql.create_pool 方法中,不需要顯式傳遞 loop 引數。aiomysql 會自動使用當前的事件迴圈(即預設的全域性事件迴圈)。
# 關閉資料庫連線池
async def close_pool(self):
if self.pool:
self.pool.close()
await self.pool.wait_closed()
# 獲取url原始碼
async def scrape_api(self, url):
# 設定協程數量
async with self.semaphore:
logging.info(f"scraping {url}")
try:
async with self.session.get(url) as response:
# 控制爬取(或請求)的速率,以避免對目標伺服器造成過多的負荷或請求頻率過高而被封禁或限制訪問。
await asyncio.sleep(1)
# 在非同步環境中,可能需要使用 response.content.read() 或 await response.text() 來獲取文字內容。
return await response.text()
except ContentTypeError as e: # aiohttp 的 ContentTypeError 異常: 請求內容型別錯誤 或者 響應內容型別錯誤
# exc_info=True 引數將導致 logging 模組記錄完整的異常資訊,包括棧跟蹤,這對於除錯非常有用。
logging.error(f'error occurred while scraping {url}', exc_info=True)
# 獲取小說分類url
async def get_type(self):
url = "https://www.cdbxs.com/sort/"
source = await self.scrape_api(url)
href_lists = etree.HTML(source).xpath('//ul[@class="nav"]/li/a/@href')[2:-4]
type_lists = []
for href in href_lists:
type_lists.append(f"{url}{href.split('/')[2]}/1/")
# print(type_lists)
return type_lists
# 獲取最大頁
async def get_max_page(self, first_page_url):
source = await self.scrape_api(first_page_url)
# print(source)
max_page = etree.HTML(source).xpath('//a[13]/text()')
return max_page
# 獲取小說列表頁資訊
async def get_book_info(self, every_page_url):
source = await self.scrape_api(every_page_url)
book_lists = []
lis = etree.HTML(source).xpath("//ul[@class='txt-list txt-list-row5']/li")
for li in lis:
book_id_url = li.xpath("span[@class='s2']/a/@href")[0]
book_id = book_id_url.split('/')[3]
# 書名
book_name = li.xpath("span[@class='s2']/a/text()")[0]
# 最新章節
new_chapter = li.xpath("span[@class='s3']/a/text()")[0]
# 作者
author = li.xpath("span[@class='s4']/text()")[0]
# 更新時間
update_time = li.xpath("span[@class='s5']/text()")[0]
source = await self.scrape_api(f"https://www.cdbxs.com{book_id_url}")
# 字數
font_num = etree.HTML(source).xpath("//p[6]/span/text()")[0]
# 摘要
summary = etree.HTML(source).xpath("//div[@class='desc xs-hidden']/text()")[0]
# 以元組新增至 book_lists
# print((book_id, book_name, new_chapter, author, update_time, font_num, summary))
book_lists.append((book_id, book_name, new_chapter, author, update_time, font_num, summary))
return book_lists
# 獲取章節urls
async def get_chapter_urls(self, chapter_list_url):
source = await self.scrape_api(chapter_list_url)
# 章節url
chapter_urls = map(lambda x: "https://www.cdbxs.com" + x, etree.HTML(source).xpath(
"//div[@class='section-box'][2]/ul[@class='section-list fix']/li/a/@href | //div[@class='section-box'][1]/ul[@class='section-list fix']/li/a/@href"))
return chapter_urls
# 獲取章節詳情資訊
async def get_chapter_info(self, chapter_url):
source = await self.scrape_api(chapter_url)
# 標題
title = etree.HTML(source).xpath("//h1[@class='title']/text()")
# 正文
content = ''.join(etree.HTML(source).xpath("//div[@id='nb_content']/dd//text()"))
if title:
return f'\'{title[0]}\'', f'\'{content}\''
else:
return '', f'\'{content}\''
# 入庫
async def save_to_mysql(self, table_name, table_column_str, table_info_str):
async with self.pool.acquire() as conn:
async with conn.cursor() as cursor:
sql = f'insert into {table_name}({table_column_str}) values{table_info_str}'
# 執行SQL語句
await cursor.execute(sql)
await conn.commit()
async def main(self):
# headers
global pool
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36 Edg/126.0.0.0"
}
# 建立非同步請求需要的session(主要加header頭資訊以及代理,cookie等頭資訊)
self.session = aiohttp.ClientSession(headers=headers)
# 獲取小說分類url
type_lists = await self.get_type()
# 分類url預設為第一頁
for first_page_url in type_lists:
# 獲取帶分類的url的前半截
type_url = first_page_url.split('1')[0]
# 獲取此分類下最大頁
max_page = await self.get_max_page(first_page_url)
# 生成此分類下每一頁url
for every_page in range(1, int(max_page[0]) + 1):
every_page_url = f"{type_url}{every_page}/"
# 獲取小說列表頁資訊
book_info_lists = await self.get_book_info(every_page_url)
# 建立程序池
pool = Pool(16)
for book_info in book_info_lists:
# 多程序抓取每本小說
pool.apply_async(await self.run(book_info))
# 關閉程序池,即停止接受新的任務。
pool.close()
# 等待所有的子程序執行結束。它會阻塞主程序,直到程序池中所有的任務都被執行完畢,然後才會繼續執行主程序後面的程式碼。
# 呼叫 join() 方法之前,應該先呼叫 close() 方法來確保不會再有新的任務被提交進來。
pool.join()
# 關閉連線池
self.close_pool()
# 關閉連線
await self.session.close()
# run方法: 抓取每一本小說的所有章節
async def run(self, book_info):
print(f"爬取小說:{book_info[1]}...")
# 初始化資料庫連線池
await self.init_pool()
# 入庫小說資訊
await self.save_to_mysql('books',
'book_id, book_name, new_chapter, author, update_time, font_num, summary',
book_info)
# 獲取章節urls
book_id = book_info[0]
chapter_urls = await self.get_chapter_urls(f"https://www.cdbxs.com/booklist/b/{book_id}/1")
# 多協程抓取小說各個章節
# 生成scrape_detail任務列表
scrape_detail_tasks = [asyncio.ensure_future(self.get_chapter_info(chapter_url)) for chapter_url in
chapter_urls]
# 併發執行任務,獲取結果
chapter_details = list(
await asyncio.gather(*scrape_detail_tasks)) # await asyncio.gather(*scrape_detail_tasks生成元組
# 入庫
# 1.新增book_id 到 chapter_detail
for i in range(len(chapter_details)):
chapter_detail = list(chapter_details[i])
chapter_detail.append(book_id)
chapter_detail = tuple(chapter_detail)
chapter_details[i] = chapter_detail
# 2.儲存至資料庫
[await self.save_to_mysql('chapters', 'chapter_name,chapter_content, bid',
chapter_detail) for chapter_detail in chapter_details]
if __name__ == '__main__':
# 開始時間
start_time = time.time()
# 初始化Spider
spider = Spider()
# 建立事件迴圈池
loop = asyncio.get_event_loop()
# 註冊
loop.run_until_complete(spider.main())
# 結束時間
end_time = time.time()
logging.info(f'total time: {end_time - start_time}')
後續釋出爬蟲更多精緻內容(按某培訓機構爬蟲課程順序釋出,歡迎關注後續釋出)