【題目描述】豆瓣圖書評論資料爬取。以《平凡的世界》、《都挺好》等為分析物件,編寫程式爬取豆瓣讀書上針對該圖書的短評資訊,要求:
(1)對前3頁短評資訊進行跨頁連續爬取;
(2)爬取的資料包含使用者名稱、短評內容、評論時間、評分和點贊數(有用數);
(3)能夠根據選擇的排序方式(熱門或最新)進行爬取,並分別針對熱門和最新排序,輸出前10位短評資訊(包括使用者名稱、短評內容、評論時間、評分和點贊數)。
(4)根據點贊數的多少,按照從多到少的順序將排名前10位的短評資訊輸出;
(5附加)結合中文分詞和詞雲生成,對前3頁的短評內容進行文字分析:按照詞語出現的次數從高到低排序,輸出前10位排序結果;並生成一個屬於自己的詞雲圖形。
【練習要求】請給出原始碼程式和執行測試結果,原始碼程式要求新增必要的註釋。
import re from collections import Counter import requests from lxml import etree import pandas as pd import jieba import matplotlib.pyplot as plt from wordcloud import WordCloud headers = { "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/101.0.4951.54 Safari/537.36 Edg/101.0.1210.39" } comments = [] words = [] def regex_change(line): # 字首的正則 username_regex = re.compile(r"^\d+::") # URL,為了防止對中文的過濾,所以使用[a-zA-Z0-9]而不是\w url_regex = re.compile(r""" (https?://)? ([a-zA-Z0-9]+) (\.[a-zA-Z0-9]+) (\.[a-zA-Z0-9]+)* (/[a-zA-Z0-9]+)* """, re.VERBOSE | re.IGNORECASE) # 剔除日期 data_regex = re.compile(u""" #utf-8編碼 年 | 月 | 日 | (週一) | (週二) | (週三) | (週四) | (週五) | (週六) """, re.VERBOSE) # 剔除所有數字 decimal_regex = re.compile(r"[^a-zA-Z]\d+") # 剔除空格 space_regex = re.compile(r"\s+") regEx = "[\n”“|,,;;''/?! 。的了是]" # 去除字串中的換行符、中文冒號、|,需要去除什麼字元就在裡面寫什麼字元 line = re.sub(regEx, "", line) line = username_regex.sub(r"", line) line = url_regex.sub(r"", line) line = data_regex.sub(r"", line) line = decimal_regex.sub(r"", line) line = space_regex.sub(r"", line) return line def getComments(url): score = 0 resp = requests.get(url, headers=headers).text html = etree.HTML(resp) comment_list = html.xpath(".//div[@class='comment']") for comment in comment_list: status = "" name = comment.xpath(".//span[@class='comment-info']/a/text()")[0] # 使用者名稱 content = comment.xpath(".//p[@class='comment-content']/span[@class='short']/text()")[0] # 短評內容 content = str(content).strip() word = jieba.cut(content, cut_all=False, HMM=False) time = comment.xpath(".//span[@class='comment-info']/a/text()")[1] # 評論時間 mark = comment.xpath(".//span[@class='comment-info']/span/@title") # 評分 if len(mark) == 0: score = 0 else: for i in mark: status = str(i) if status == "力薦": score = 5 elif status == "推薦": score = 4 elif status == "還行": score = 3 elif status == "較差": score = 2 elif status == "很差": score = 1 good = comment.xpath(".//span[@class='comment-vote']/span[@class='vote-count']/text()")[0] # 點贊數(有用數) comments.append([str(name), content, str(time), score, int(good)]) for i in word: if len(regex_change(i)) >= 2: words.append(regex_change(i)) def getWordCloud(words): # 生成詞雲 all_words = [] all_words += [word for word in words] dict_words = dict(Counter(all_words)) bow_words = sorted(dict_words.items(), key=lambda d: d[1], reverse=True) print("熱詞前10位:") for i in range(10): print(bow_words[i]) text = ' '.join(words) w = WordCloud(background_color='white', width=1000, height=700, font_path='simhei.ttf', margin=10).generate(text) plt.show() plt.imshow(w) w.to_file('wordcloud.png') print("請選擇以下選項:") print(" 1.熱門評論") print(" 2.最新評論") info = int(input()) print("前10位短評資訊:") title = ['使用者名稱', '短評內容', '評論時間', '評分', '點贊數'] if info == 1: comments = [] words = [] for i in range(0, 60, 20): url = "https://book.douban.com/subject/10517238/comments/?start={}&limit=20&status=P&sort=new_score".format( i) # 前3頁短評資訊(熱門) getComments(url) df = pd.DataFrame(comments, columns=title) print(df.head(10)) print("點贊數前10位的短評資訊:") df = df.sort_values(by='點贊數', ascending=False) print(df.head(10)) getWordCloud(words) elif info == 2: comments = [] words=[] for i in range(0, 60, 20): url = "https://book.douban.com/subject/10517238/comments/?start={}&limit=20&status=P&sort=time".format( i) # 前3頁短評資訊(最新) getComments(url) df = pd.DataFrame(comments, columns=title) print(df.head(10)) print("點贊數前10位的短評資訊:") df = df.sort_values(by='點贊數', ascending=False) print(df.head(10)) getWordCloud(words)