前言
很多免費的資源只能看但是不提供下載,今天我們以小說為例教你如何把網際網路上只能看不能下載的東西給下載下來
知識點:
- requests
- css選擇器
- 全站小說爬取思路
開發環境:
- 版 本:anaconda5.2.0(python3.6.5)
- 編輯器:pycharm 社群版
程式碼
匯入工具
import requests import parsel
請求頭
headers = { 'User-Agent': 'gao fu shui' }
請求資料
response = requests.get(chapter_url, headers=headers) # 設定文字的編碼為 utf-8 # response.encoding = 'utf-8' # 萬能解碼 99%的情況下都是對的 # print(response.apparent_encoding) # requests 自動識別的編碼 # print(response.encoding) # 服務直接我們的編碼 response.encoding = response.apparent_encoding # print(response) html = response.text # print(html) # print(response.headers) # # 響應體.請求體.請求頭資訊 # print(response.request.headers) # # 檢視原始碼 ctrl + 滑鼠左鍵 # print(response.cookies)
解析資料
# css xpath # parsel = css + xpath + re # 把字串變成可以解析的物件 selector = parsel.Selector(html) # selector.css() # selector.xpath() # selector.re() # get 獲取物件裡面的文字內容 # 屬性提取器 attr h1 = selector.css('.reader h1::text').get() # print(h1) content = selector.css('.showtxt::text').getall() # print(content) # # xpath 路徑提取器 # h1 = selector.xpath('//h1/text()').get() # print(h1) # content = selector.xpath('//*[@class="showtxt"]//text()').getall() # print(content) # 去除每一個空白字元 # 定義一個空列表,留待備用 {} lines = [] for c in content: lines.append(c.strip()) print(h1) # print(lines) # str join 字串的合併方法 text = '\n'.join(lines) # print(text)
儲存資料
file = open(book_name + '.txt', mode='a', encoding='utf-8') file.write(h1) file.write('\n') file.write(text) file.write('\n') file.close()
獲取所有章節的下載地址
# download_one_chapter('http://www.shuquge.com/txt/8659/2324752.html') # download_one_chapter('http://www.shuquge.com/txt/8659/2324753.html') # download_one_chapter('http://www.shuquge.com/txt/8659/2324754.html') def download_one_book(index_url): index_response = requests.get(index_url, headers=headers) index_response.encoding = index_response.apparent_encoding sel = parsel.Selector(index_response.text) book_name = sel.css('h2::text').get() # 提取了所有章節的下載地址 urls = sel.css('.listmain dl dd a::attr(href)').getall() # 不要最新的 12 章放在最前main for url in urls[12:]: chapter_url = index_url[:-10] + url print(chapter_url) download_one_chapter(chapter_url, book_name) # download_one_book('http://www.shuquge.com/txt/8659/index.html') # download_one_book('http://www.shuquge.com/txt/5809/index.html') # download_one_book('http://www.shuquge.com/txt/63542/index.html') """下載玄幻類的第一頁""" # 2_1.html 控制類別頁數 可以for in 生產類別 for in 生產 頁數 for cate in ['1', '2', '4']: for page in range(1, 101): cate_url = 'http://www.shuquge.com/category/' + cate + '_' + str(page) + '.html' cate_response = requests.get(cate_url, headers=headers) cate_response.encoding = cate_response.apparent_encoding sel = parsel.Selector(cate_response.text) # 提取了所有章節的下載地址 urls = sel.css('.l.bd > ul > li > span.s2 > a::attr(href)').getall() # 不要最新的 12 章放在最前main for url in urls: print(url) download_one_book(url)