Python爬蟲遞迴呼叫爬取動漫美女圖片
效果
程式碼,碼雲地址
import datetime import os import random from lxml import etree url_src='https://www.nvshens.org' user_agent = [ "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1", "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6", "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6", "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/19.77.34.5 Safari/537.1", "Mozilla/5.0 (Windows NT 6.0) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.36 Safari/536.5", "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3", "Mozilla/5.0 (Windows NT 5.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3", "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3", "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3", "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3", "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3", "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3", "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.0 Safari/536.3", "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24" ] start = datetime.datetime.now() headers={ "User-Agent": random.choice(user_agent) , "Referer": "http://pic.netbian.com/4kdongman/" } # !/usr/bin/env python # encoding=utf-8 import urllib import image from io import StringIO # encoding=utf-8 import requests from io import BytesIO from PIL import Image url = 'http://pic.netbian.com/4kdongman/' # response = requests.get(url) # tmpIm = BytesIO(response.content) # im = Image.open(tmpIm) # # # 長寬儲存在im當中的size列表當中 # w = im.size[0] # h = im.size[1] def get_html(url): response=requests.get(url,headers=headers) response.encoding='gbk'#response.apparent_encoding#設定成頁面一樣的編碼 print("正在請求的url:"+url) if response.status_code ==200: html=etree.HTML(response.text) return html suffix=['jpg','png','git'] def get_page_name(html): if html is not None: page_name=html.xpath('//title/text()') return page_name[0] src_url='http://pic.netbian.com' globals_href=set() def get_this_page_all_href(html): href_list=html.xpath('//child::*/@href') need_href=set() for i in href_list: if len(i)>3 and i[0]=='/' and('youxi'in i or 'dongman'in i or 'tupian'in i): if len(globals_href) <=globals_max_href and src_url+i not in globals_href:#最大請求globals_max_href 個href href=src_url+i need_href.add(href) globals_href.add(href) print(f'目前叢集有{len(globals_href)}個有效超連結') print(f'本頁面有{len(need_href)}個有效超連結') print(need_href) for i in need_href: get_this_page_all_href(get_html(i)) globals_img_urls=set() def find_this_page_imgs_and_save_img(url): html = get_html(url) if html is not None: page_name=get_page_name(html) # 找到page名字 get_this_page_all_href(html) # 找到本頁所有超連結 src_list=html.xpath('//child::*[@src and @alt]/@src') alt_list=html.xpath('//child::*[@src and @alt]/@alt') img_info=dict(zip(src_list,alt_list)) print('-'*30) print(img_info) print('頁面標題:'+page_name) print('-' * 30) valid_img_urls=set() for i in img_info.keys(): if len(i)>3 and i.endswith('.jpg') and src_url+i not in globals_href: img_url=src_url+i valid_img_urls.add(img_url) globals_img_urls.add(img_url) save_img(img_url,page_name,img_info[i]) print(f'本頁有{len(valid_img_urls)}個有效圖片') print(f'目前叢集有{len(globals_img_urls)}個有效圖片') dir=r'X:\Users\SwordArtOnline\Desktop\爬蟲\dongman\\' def save_img(img_url,page_name,img_name): response=requests.get(img_url,headers=headers) img_conten=response.content save_path=dir+page_name+'\\' tmp_img=BytesIO(img_conten) img=Image.open(tmp_img) w=img.size[0] h=img.size[1] if not os.path.exists(save_path): os.makedirs(save_path) print('成功建立目錄:'+save_path) img_name=save_path+img_name+f'_本圖片尺寸_{w}x{h}.jpg' with open(img_name,'wb') as f: f.write(img_conten) print(f'成功儲存第{len(globals_img_urls)}圖片:'+img_name) globals_max_href=100 if __name__ == '__main__': find_this_page_imgs_and_save_img(url) for i in globals_href: find_this_page_imgs_and_save_img(i) print(f'叢集總共{len(globals_href)}有效超連結')
相關文章
- 爬蟲---xpath解析(爬取美女圖片)爬蟲
- python 爬蟲 下載百度美女圖片Python爬蟲
- Python爬蟲—爬取某網站圖片Python爬蟲網站
- python爬蟲---網頁爬蟲,圖片爬蟲,文章爬蟲,Python爬蟲爬取新聞網站新聞Python爬蟲網頁網站
- Python爬蟲入門【5】:27270圖片爬取Python爬蟲
- Java爬蟲批量爬取圖片Java爬蟲
- Python爬蟲實戰詳解:爬取圖片之家Python爬蟲
- node:爬蟲爬取網頁圖片爬蟲網頁
- Python爬蟲新手教程: 知乎文章圖片爬取器Python爬蟲
- 爬蟲Selenium+PhantomJS爬取動態網站圖片資訊(Python)爬蟲JS網站Python
- 爬蟲 Scrapy框架 爬取圖蟲圖片並下載爬蟲框架
- python如何爬取動漫截圖網Python
- 新手爬蟲教程:Python爬取知乎文章中的圖片爬蟲Python
- 【python--爬蟲】千圖網高清背景圖片爬蟲Python爬蟲
- 爬蟲豆瓣美女爬蟲
- python爬去百度美女吧圖片Python
- Python爬蟲入門【4】:美空網未登入圖片爬取Python爬蟲
- Python網路爬蟲2 - 爬取新浪微博使用者圖片Python爬蟲
- 用Python網路爬蟲獲取Mikan動漫資源Python爬蟲
- 網路爬蟲---從千圖網爬取圖片到本地爬蟲
- python 爬蟲之requests爬取頁面圖片的url,並將圖片下載到本地Python爬蟲
- Python資料爬蟲學習筆記(11)爬取千圖網圖片資料Python爬蟲筆記
- 使用Python爬蟲實現自動下載圖片Python爬蟲
- Python爬蟲入門【9】:圖蟲網多執行緒爬取Python爬蟲執行緒
- Python爬蟲入門【11】:半次元COS圖爬取Python爬蟲
- Python爬蟲入門教程 4-100 美空網未登入圖片爬取Python爬蟲
- 【Python學習】爬蟲爬蟲爬蟲爬蟲~Python爬蟲
- 教你用Python爬取圖蟲網Python
- 簡單的爬蟲:爬取網站內容正文與圖片爬蟲網站
- Python 爬蟲零基礎教程(1):爬單個圖片Python爬蟲
- python 爬蟲 爬取 learnku 精華文章Python爬蟲
- Python《必應bing桌面圖片爬取》Python
- Python爬蟲入門教程 50-100 Python3爬蟲爬取VIP視訊-Python爬蟲6操作Python爬蟲
- 【python--爬蟲】彼岸圖網高清桌布爬蟲Python爬蟲
- 蘇寧易購網址爬蟲爬取商品資訊及圖片爬蟲
- Python爬蟲入門【7】: 蜂鳥網圖片爬取之二Python爬蟲
- Python爬蟲入門【8】: 蜂鳥網圖片爬取之三Python爬蟲
- Python爬蟲入門【6】:蜂鳥網圖片爬取之一Python爬蟲