【Python】如何獲取知乎最有價值的內容

楊奇龍發表於2017-05-06
一 前言 
   相信大部分能看到這篇blog的人都聽說過知乎吧?如果你沒有聽說過,那麼連結在這裡   知乎  作為一個知乎er,為了更加深入的理解“xxx 是一種什麼體驗”(的圖片),為了踐行 “技術改變生活”(實則有些wuliao) ,使用requsets 爬取知乎中最優價值的內容,本文字著探索的精神,寫一段獲取內容的python程式。

二 踐行
  1. #!/usr/bin/env python
  2. #-*- coding:utf-8 -*-
  3. import re
  4. import requests
  5. import os
  6. from urlparse import urlsplit
  7. from os.path import basename
  8. def getHtml(url):
  9.     session = requests.Session()
  10.     # 模擬瀏覽器訪問
  11.     header = {
  12.         'User-Agent': "Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/45.0.2454.101 Safari/537.36",
  13.         'Accept-Encoding': 'gzip, deflate'}
  14.     res = session.get(url, headers=header)
  15.     if res.status_code == 200:
  16.         content = res.content
  17.     else:
  18.         content = ''
  19.     return content

  20. def mkdir(path):
  21.     if not os.path.exists(path):
  22.         print '新建資料夾:', path
  23.         os.makedirs(path)
  24.         return True
  25.     else:
  26.         print u"圖片存放於:", os.getcwd() + os.sep + path
  27.         return False

  28. def download_pic(img_lists, dir_name):
  29.     print "一共有 {num} 張照片".format(num=len(img_lists))
  30.     for image_url in img_lists:
  31.         response = requests.get(image_url, stream=True)
  32.         if response.status_code == 200:
  33.             image = response.content
  34.         else:
  35.             continue
  36.         file_name = dir_name + os.sep + basename(urlsplit(image_url)[2])
  37.         try:
  38.             with open(file_name, "wb") as picture:
  39.                 picture.write(image)
  40.         except IOError:
  41.             print("IO Error\n")
  42.             return
  43.         finally:
  44.             picture.close
  45.             print "下載 {pic_name} 完成!".format(pic_name=file_name)

  46. def getAllImg(html):
  47.     # 利用正規表示式把原始碼中的圖片地址過濾出來
  48.     #reg = r'data-actualsrc="(.*?)">'
  49.     reg = r'https://pic\d.zhimg.com/[a-fA-F0-9]{5,32}_\w+.jpg'
  50.     imgre = re.compile(reg, re.S)
  51.     tmp_list = imgre.findall(html) # 表示在整個網頁中過濾出所有圖片的地址,放在imglist中
  52.     # 清理掉頭像和去重 獲取data-original的內容
  53.     tmp_list = list(set(tmp_list)) # 去重
  54.     imglist = []
  55.     for item in tmp_list:
  56.         if item.endswith('r.jpg'):
  57.             img_list.append(item)
  58.     print 'num : %d' % (len(imglist))
  59.     return imglist


  60. if __name__ == '__main__':
  61.     question_id = 35990613
  62.     zhihu_url = "https://www.zhihu.com/question/{qid}".format(qid=question_id)
  63.     html_content = getHtml(zhihu_url)
  64.     path = 'zhihu_pic'
  65.     mkdir(path) # 建立本地資料夾
  66.     img_list = getAllImg(html_content) # 獲取圖片的地址列表
  67.     download_pic(img_list, path)       # 儲存圖片

本程式碼還存在一些不足的地方,無法完全獲取全部的圖片,需要在相容 自動點選 ”更多“ 載入更多答案。
程式碼第二版解決了第一版程式碼中不能自動載入的問題。
  1. #!/usr/bin/env python
  2. #-*- coding:utf-8 -*-
  3. import re
  4. import requests
  5. import os
  6. from urlparse import urlsplit
  7. from os.path import basename

  8. headers = {
  9.     'User-Agent': "Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/45.0.2454.101 Safari/537.36",
  10.     'Accept-Encoding': 'gzip, deflate'}


  11. def mkdir(path):
  12.     if not os.path.exists(path):
  13.         print '新建資料夾:', path
  14.         os.makedirs(path)
  15.         return True
  16.     else:
  17.         print u"圖片存放於:", os.getcwd() + os.sep + path
  18.         return False


  19. def download_pic(img_lists, dir_name):
  20.     print "一共有 {num} 張照片".format(num=len(img_lists))
  21.     for image_url in img_lists:
  22.         response = requests.get(image_url, stream=True)
  23.         if response.status_code == 200:
  24.             image = response.content
  25.         else:
  26.             continue
  27.         file_name = dir_name + os.sep + basename(urlsplit(image_url)[2])
  28.         try:
  29.             with open(file_name, "wb") as picture:
  30.                 picture.write(image)
  31.         except IOError:
  32.             print("IO Error\n")
  33.             continue
  34.         finally:
  35.             picture.close
  36.             print "下載 {pic_name} 完成!".format(pic_name=file_name)


  37. def get_image_url(qid, headers):
  38.     # 利用正規表示式把原始碼中的圖片地址過濾出來
  39.     #reg = r'data-actualsrc="(.*?)">'
  40.     tmp_url = "https://www.zhihu.com/node/QuestionAnswerListV2"
  41.     size = 10
  42.     image_urls = []
  43.     session = requests.Session()
  44.     # 利用迴圈自動完成需要點選 “更多” 獲取所有答案,每個分頁作為一個answer集合。
  45.     while True:
  46.         postdata = {'method': 'next', 'params': '{"url_token":' +
  47.                     str(qid) + ',"pagesize": "10",' + '"offset":' + str(size) + "}"}
  48.         page = session.post(tmp_url, headers=headers, data=postdata)
  49.         ret = eval(page.text)
  50.         answers = ret['msg']
  51.         size += 10
  52.         if not answers:
  53.             print "圖片URL獲取完畢, 頁數: ", (size - 10) / 10
  54.             return image_urls
  55.         #reg = r'https://pic\d.zhimg.com/[a-fA-F0-9]{5,32}_\w+.jpg'
  56.         imgreg = re.compile('data-original="(.*?)"', re.S)
  57.         for answer in answers:
  58.             tmp_list = []
  59.             url_items = re.findall(imgreg, answer)
  60.             for item in url_items: # 這裡去掉得到的圖片URL中的轉義字元'\\'
  61.                 image_url = item.replace("\\", "")
  62.                 tmp_list.append(image_url)
  63.             # 清理掉頭像和去重 獲取data-original的內容
  64.             tmp_list = list(set(tmp_list)) # 去重
  65.             for item in tmp_list:
  66.                 if item.endswith('r.jpg'):
  67.                     print item
  68.                     image_urls.append(item)
  69.         print 'size: %d, num : %d' % (size, len(image_urls))


  70. if __name__ == '__main__':
  71.     question_id = 26037846
  72.     zhihu_url = "https://www.zhihu.com/question/{qid}".format(qid=question_id)
  73.     path = 'zhihu_pic'
  74.     mkdir(path) # 建立本地資料夾
  75.     img_list = get_image_url(question_id, headers) # 獲取圖片的地址列表
  76.     download_pic(img_list, path) # 儲存圖片


來自 “ ITPUB部落格 ” ,連結:http://blog.itpub.net/22664653/viewspace-2138599/,如需轉載,請註明出處,否則將追究法律責任。

相關文章