最近研究了一下python爬蟲,所以寫一下自己的經驗,爬取豆瓣電影的資訊。(第一次寫這個!)
1,首先找一些代理使用者代理(user_agent)
self.user_agent = [
"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; AcooBrowser; .NET CLR 1.1.4322; .NET CLR 2.0.50727)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0; Acoo Browser; SLCC1; .NET CLR 2.0.50727; Media Center PC 5.0; .NET CLR 3.0.04506)",
"Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.5; AOLBuild 4337.35; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)",
"Mozilla/5.0 (Windows; U; MSIE 9.0; Windows NT 9.0; en-US)",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 2.0.50727; Media Center PC 6.0)",
"Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 1.0.3705; .NET CLR 1.1.4322)",
"Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 5.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 3.0.04506.30)",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN) AppleWebKit/523.15 (KHTML, like Gecko, Safari/419.3) Arora/0.3 (Change: 287 c9dfb30)",
"Mozilla/5.0 (X11; U; Linux; en-US) AppleWebKit/527+ (KHTML, like Gecko, Safari/419.3) Arora/0.6",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.2pre) Gecko/20070215 K-Ninja/2.1.1",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN; rv:1.9) Gecko/20080705 Firefox/3.0 Kapiko/3.0",
"Mozilla/5.0 (X11; Linux i686; U;) Gecko/20070322 Kazehakase/0.4.5",
"Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.8) Gecko Fedora/1.9.0.8-1.fc10 Kazehakase/0.5.6",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3) AppleWebKit/535.20 (KHTML, like Gecko) Chrome/19.0.1036.7 Safari/535.20",
"Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; fr) Presto/2.9.168 Version/11.52",
]
複製程式碼
2.找了一個獲取代理ip的網址
www.kuaidaili.com/ops/proxyli…
3.測試代理ip是否有效的網址
4.下面程式碼是爬取資訊所用的時間。
# 計算執行所需的時間
def run_time(func):
def wrapper(*args, **kw):
start = time.time()
func(*args, **kw)
end = time.time()
print('running', end-start, 's')
return wrapper
複製程式碼
5.定義一些常量
def __init__(self):
# 豆瓣連結
self.start_url = 'https://movie.douban.com/top250'
# 獲取代理IP連結,不能確保代理ip真實可用
self.getProxy = 'http://www.xicidaili.com/nn/1/'
# 測試代理IP是否代理成功
self.testIP = 'http://icanhazip.com'
# 爬取豆瓣資訊的佇列
self.qurl = Queue()
# 爬取代理ip資訊的佇列
self.IPQurl = Queue()
self.data = list()
self.item_num = 5 # 限制每頁提取個數(也決定了二級頁面數量)防止對網頁請求過多
self.thread_num = 10 # 抓取二級頁面執行緒數量
self.first_running = True
# 設定代理ip
self.proxy = {}
# 設定獲取代理ip的代理ip
self.proxy1 = {}
# 不設定代理ip
self.notproxy = {}
self.user_agent = [
"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; AcooBrowser; .NET CLR 1.1.4322; .NET CLR 2.0.50727)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0; Acoo Browser; SLCC1; .NET CLR 2.0.50727; Media Center PC 5.0; .NET CLR 3.0.04506)",
"Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.5; AOLBuild 4337.35; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)",
"Mozilla/5.0 (Windows; U; MSIE 9.0; Windows NT 9.0; en-US)",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 2.0.50727; Media Center PC 6.0)",
"Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 1.0.3705; .NET CLR 1.1.4322)",
"Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 5.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 3.0.04506.30)",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN) AppleWebKit/523.15 (KHTML, like Gecko, Safari/419.3) Arora/0.3 (Change: 287 c9dfb30)",
"Mozilla/5.0 (X11; U; Linux; en-US) AppleWebKit/527+ (KHTML, like Gecko, Safari/419.3) Arora/0.6",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.2pre) Gecko/20070215 K-Ninja/2.1.1",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN; rv:1.9) Gecko/20080705 Firefox/3.0 Kapiko/3.0",
"Mozilla/5.0 (X11; Linux i686; U;) Gecko/20070322 Kazehakase/0.4.5",
"Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.8) Gecko Fedora/1.9.0.8-1.fc10 Kazehakase/0.5.6",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3) AppleWebKit/535.20 (KHTML, like Gecko) Chrome/19.0.1036.7 Safari/535.20",
"Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; fr) Presto/2.9.168 Version/11.52",
]
複製程式碼
6.爬取代理ip的資訊放入佇列裡,方便爬取豆瓣資訊時獲取。
def get_proxy(self):
url = self.getProxy
try:
# random.choice(self.user_agent)是隨機獲取一個使用者代理。
# 因為免費的代理ip不穩定所以這裡設定self.notproxy,如果穩定的話就換成self.proxy1
r = requests.get(url, headers={'User-Agent': random.choice(self.user_agent)}, proxies=self.notproxy,verify=False,timeout=1)
r.encoding = 'utf-8'
if (r.status_code == 200):
soup = BeautifulSoup(r.content, 'html.parser')
ip_list = soup.find_all('table', id='ip_list')
if(len(ip_list)):
tr_list = ip_list[0].find_all('tr')[1:10]
for i in tr_list:
td_list = i.find_all('td')
temp = td_list[5].text + ',' + td_list[1].text + ':' +td_list[2].text
self.IPQurl.put(temp)
return True
else:
print('頁面查詢不到該id')
return False
else:
print('無法獲取代理ip')
return False
except Exception as e:
print('獲取代理ip出錯--',str(e))
return False
複製程式碼
7.設定代理ip的值
def set_proxy(self):
if self.IPQurl.empty():
if self.get_proxy():
arr = self.IPQurl.get().split(',')
arr1 = self.IPQurl.get().split(',')
if arr[0].find('HTTPS') == -1:
self.proxy = {arr[0].lower(): 'http://'+arr[1]}
else:
self.proxy = {arr[0].lower(): 'https://'+arr[1]}
if arr1[0].find('HTTPS') == -1:
self.proxy1 = {arr1[0].lower(): 'http://'+arr1[1]}
else:
self.proxy1 = {arr1[0].lower(): 'https://'+arr1[1]}
else:
self.proxy = {}
self.proxy1 = {}
else:
arr = self.IPQurl.get().split(',')
if arr[0].find('HTTPS') == -1:
self.proxy = {arr[0].lower(): 'http://' + arr[1]}
else:
self.proxy = {arr[0].lower(): 'http://' + arr[1]}
複製程式碼
8.開始爬取豆瓣top250資料的連結。
def parse_first(self, url):
print('crawling,parse_first', url)
self.set_proxy()
try:
# 因為免費的代理ip不穩定所以這裡設定self.notproxy,如果穩定的話就換成self.proxy
r = requests.get(url, headers={'User-Agent': random.choice(self.user_agent)},proxies=self.notproxy,verify=False,timeout=5)
r.encoding = 'utf-8'
if r.status_code == 200:
soup = BeautifulSoup(r.content, 'html.parser')
# 每一頁爬資料條數
movies = soup.find_all('div', class_='info')[:self.item_num]
for movie in movies:
url = movie.find('div', class_='hd').a['href']
self.qurl.put(url)
nextpage = soup.find('span', class_='next').a
if nextpage:
nexturl = self.start_url + nextpage['href']
self.parse_first(nexturl)
else:
self.first_running = False
else:
print('ip被遮蔽')
self.proxy = {}
self.proxy1 = {}
self.first_running = False
except Exception as e:
self.proxy = {}
self.proxy1 = {}
self.first_running = False
print('代理ip代理失敗--',str(e))
複製程式碼
9.是時候爬取真正的資訊了,爬取的資訊寫入陣列裡面。
def parse_second(self):
# 停止觸發的條件是self.first_running = False和self.qurl為空。
while self.first_running or not self.qurl.empty():
if not self.qurl.empty():
url = self.qurl.get()
print('crawling,parse_second', url)
self.set_proxy()
try:
r = requests.get(url,headers={'User-Agent': random.choice(self.user_agent)},proxies=self.notproxy,verify=False,timeout=5)
r.encoding = 'utf-8'
if r.status_code == 200:
soup = BeautifulSoup(r.content, 'html.parser')
mydict = {}
mydict['url'] = url
title = soup.find('span', property = 'v:itemreviewed')
mydict['title'] = title.text if title else None
duration = soup.find('span', property = 'v:runtime')
mydict['duration'] = duration.text if duration else None
addtime = soup.find('span', property = 'v:initialReleaseDate')
mydict['addtime'] = addtime.text if addtime else None
average = soup.find('strong', property = 'v:average')
mydict['average'] = average.text if average else None
imgSrc = soup.find_all('div', id='mainpic')[0].img['src']
mydict['imgSrc'] = imgSrc if imgSrc else None
mydict['play'] = []
ul = soup.find_all('ul', class_='bs')
if len(ul):
li = ul[0].find_all('li')
for i in li:
obj = {
'url':urllib.parse.unquote(i.a['href'].replace('https://www.douban.com/link2/?url=','')),
'text':i.a.text.replace(' ', '').replace('\n','')
}
mydict['play'].append(obj)
self.data.append(mydict)
# 執行緒隨機休眠
time.sleep(random.random() * 5)
else:
print('ip被遮蔽')
except Exception as e:
self.proxy = {}
self.proxy1 = {}
print('代理ip代理失敗2--',str(e))
複製程式碼
10.真正要執行的函式是這裡。
#這個是函式執行完所需要的時間
@run_time
def run(self):
ths = []
th1 = Thread(target=self.parse_first, args=(self.start_url, ))
th1.start()
ths.append(th1)
for _ in range(self.thread_num):
th = Thread(target=self.parse_second,)
th.setDaemon(True)
th.start()
ths.append(th)
for th in ths:
# 等待執行緒終止
th.join()
s = json.dumps(self.data, ensure_ascii=False, indent=4)
with open('top250.json', 'w', encoding='utf-8') as f:
f.write(s)
print('Data crawling is finished.')
複製程式碼
11.開始執行啦。
if __name__ == '__main__':
Spider().run()
複製程式碼
12.最後執行完啦。這裡只爬取了50條資料,用了20秒。
爬取出來的資料格式是這樣子的。
13.最後附上所有程式碼,希望能幫到你。
import time
import json
import random
import logging
import requests
import urllib.parse
from queue import Queue
from threading import Thread
from bs4 import BeautifulSoup
logging.captureWarnings(True)
# 爬豆瓣電影 Top 250
# 計算執行所需的時間
def run_time(func):
def wrapper(*args, **kw):
start = time.time()
func(*args, **kw)
end = time.time()
print('running', end-start, 's')
return wrapper
class Spider():
def __init__(self):
self.start_url = 'https://movie.douban.com/top250'
# 獲取代理IP連結,不能確保代理ip真實可用
self.getProxy = 'http://www.xicidaili.com/nn/1/'
# 測試代理IP是否代理成功
self.testIP = 'http://icanhazip.com'
self.qurl = Queue()
self.IPQurl = Queue()
self.data = list()
self.item_num = 5 # 限制每頁提取個數(也決定了二級頁面數量)防止對網頁請求過多
self.thread_num = 10 # 抓取二級頁面執行緒數量
self.first_running = True
# 設定代理ip
self.proxy = {}
# 設定獲取代理ip的代理ip
self.proxy1 = {}
# 不設定代理ip
self.notproxy = {}
self.user_agent = [
"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; AcooBrowser; .NET CLR 1.1.4322; .NET CLR 2.0.50727)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0; Acoo Browser; SLCC1; .NET CLR 2.0.50727; Media Center PC 5.0; .NET CLR 3.0.04506)",
"Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.5; AOLBuild 4337.35; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)",
"Mozilla/5.0 (Windows; U; MSIE 9.0; Windows NT 9.0; en-US)",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 2.0.50727; Media Center PC 6.0)",
"Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 1.0.3705; .NET CLR 1.1.4322)",
"Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 5.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 3.0.04506.30)",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN) AppleWebKit/523.15 (KHTML, like Gecko, Safari/419.3) Arora/0.3 (Change: 287 c9dfb30)",
"Mozilla/5.0 (X11; U; Linux; en-US) AppleWebKit/527+ (KHTML, like Gecko, Safari/419.3) Arora/0.6",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.2pre) Gecko/20070215 K-Ninja/2.1.1",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN; rv:1.9) Gecko/20080705 Firefox/3.0 Kapiko/3.0",
"Mozilla/5.0 (X11; Linux i686; U;) Gecko/20070322 Kazehakase/0.4.5",
"Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.8) Gecko Fedora/1.9.0.8-1.fc10 Kazehakase/0.5.6",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3) AppleWebKit/535.20 (KHTML, like Gecko) Chrome/19.0.1036.7 Safari/535.20",
"Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; fr) Presto/2.9.168 Version/11.52",
]
def get_proxy(self):
url = self.getProxy
try:
r = requests.get(url, headers={'User-Agent': random.choice(self.user_agent)}, proxies=self.notproxy,verify=False,timeout=1)
r.encoding = 'utf-8'
if (r.status_code == 200):
soup = BeautifulSoup(r.content, 'html.parser')
ip_list = soup.find_all('table', id='ip_list')
if(len(ip_list)):
tr_list = ip_list[0].find_all('tr')[1:10]
for i in tr_list:
td_list = i.find_all('td')
temp = td_list[5].text + ',' + td_list[1].text + ':' +td_list[2].text
self.IPQurl.put(temp)
return True
else:
print('頁面查詢不到該id')
return False
else:
print('無法獲取代理ip')
return False
except Exception as e:
print('獲取代理ip出錯--',str(e))
return False
def set_proxy(self):
if self.IPQurl.empty():
if self.get_proxy():
arr = self.IPQurl.get().split(',')
arr1 = self.IPQurl.get().split(',')
if arr[0].find('HTTPS') == -1:
self.proxy = {arr[0].lower(): 'http://'+arr[1]}
else:
self.proxy = {arr[0].lower(): 'https://'+arr[1]}
if arr1[0].find('HTTPS') == -1:
self.proxy1 = {arr1[0].lower(): 'http://'+arr1[1]}
else:
self.proxy1 = {arr1[0].lower(): 'https://'+arr1[1]}
else:
self.proxy = {}
self.proxy1 = {}
else:
arr = self.IPQurl.get().split(',')
if arr[0].find('HTTPS') == -1:
self.proxy = {arr[0].lower(): 'http://' + arr[1]}
else:
self.proxy = {arr[0].lower(): 'http://' + arr[1]}
def parse_first(self, url):
print('crawling,parse_first', url)
self.set_proxy()
try:
r = requests.get(url, headers={'User-Agent': random.choice(self.user_agent)},proxies=self.notproxy,verify=False,timeout=5)
r.encoding = 'utf-8'
if r.status_code == 200:
soup = BeautifulSoup(r.content, 'html.parser')
# 每一頁爬資料條數
movies = soup.find_all('div', class_='info')[:self.item_num]
for movie in movies:
url = movie.find('div', class_='hd').a['href']
self.qurl.put(url)
nextpage = soup.find('span', class_='next').a
if nextpage:
nexturl = self.start_url + nextpage['href']
self.parse_first(nexturl)
else:
self.first_running = False
else:
print('ip被遮蔽')
self.proxy = {}
self.proxy1 = {}
self.first_running = False
except Exception as e:
self.proxy = {}
self.proxy1 = {}
self.first_running = False
print('代理ip代理失敗--',str(e))
def parse_second(self):
while self.first_running or not self.qurl.empty():
if not self.qurl.empty():
url = self.qurl.get()
print('crawling,parse_second', url)
self.set_proxy()
try:
r = requests.get(url,headers={'User-Agent': random.choice(self.user_agent)},proxies=self.notproxy,verify=False,timeout=5)
r.encoding = 'utf-8'
if r.status_code == 200:
soup = BeautifulSoup(r.content, 'html.parser')
mydict = {}
mydict['url'] = url
title = soup.find('span', property = 'v:itemreviewed')
mydict['title'] = title.text if title else None
duration = soup.find('span', property = 'v:runtime')
mydict['duration'] = duration.text if duration else None
addtime = soup.find('span', property = 'v:initialReleaseDate')
mydict['addtime'] = addtime.text if addtime else None
average = soup.find('strong', property = 'v:average')
mydict['average'] = average.text if average else None
imgSrc = soup.find_all('div', id='mainpic')[0].img['src']
mydict['imgSrc'] = imgSrc if imgSrc else None
mydict['play'] = []
ul = soup.find_all('ul', class_='bs')
if len(ul):
li = ul[0].find_all('li')
for i in li:
obj = {
'url':urllib.parse.unquote(i.a['href'].replace('https://www.douban.com/link2/?url=','')),
'text':i.a.text.replace(' ', '').replace('\n','')
}
mydict['play'].append(obj)
self.data.append(mydict)
# 執行緒隨機休眠
time.sleep(random.random() * 5)
else:
print('ip被遮蔽')
except Exception as e:
self.proxy = {}
self.proxy1 = {}
print('代理ip代理失敗2--',str(e))
@run_time
def run(self):
ths = []
th1 = Thread(target=self.parse_first, args=(self.start_url, ))
th1.start()
ths.append(th1)
for _ in range(self.thread_num):
th = Thread(target=self.parse_second,)
th.setDaemon(True)
th.start()
ths.append(th)
for th in ths:
# 等待執行緒終止
th.join()
s = json.dumps(self.data, ensure_ascii=False, indent=4)
with open('top250.json', 'w', encoding='utf-8') as f:
f.write(s)
print('Data crawling is finished.')
if __name__ == '__main__':
Spider().run()
複製程式碼