


蘭州IT培訓
美國上市IT培訓機構
0931-4903018
如何爬取B站彈幕?下面和蘭州達內IT培訓機構一起來看看吧!
一、分析網頁
二、獲取彈幕數據
三、繪制詞云圖
視頻鏈接:https://www.bilibili.com/video/BV1zE411Y7JY
一、分析網頁
點擊彈幕列表,查看歷史彈幕,并選擇任意一天的歷史彈幕,此時就能找到存儲該日期彈幕的ajax數據包,所有彈幕數據放在一個i標簽里。
查看請求的相關信息
可以發現Request URL關鍵就是 oid 和 date 兩個參數,date很明顯是日期,換日期可以實現翻頁爬取彈幕,oid應該是視頻標識之類的東西,換個oid可以訪問其他視頻彈幕頁面。
在這里插入圖片描述
二、獲取彈幕數據
本文爬取該視頻1月1日到8月6日的歷史彈幕數據,需構造出時間序列:
import pandas as pd
start = '20200101'
end = '20200806'
# 生成時間序列
date_list = [x for x in pd.date_range(start, end).strftime('%Y-%m-%d')]
print(date_list)
運行結果如下:
['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05', '2020-01-06', ... '2020-08-06']
Process finished with exit code 0
爬蟲代碼如下:
# -*- coding: UTF-8 -*-
"""
@File :spider.py
@Author :葉庭云
@CSDN :https://yetingyun.blog.csdn.net/
"""
import requests
import pandas as pd
import re
import time
import random
from concurrent.futures import ThreadPoolExecutor
import datetime
user_agent = [
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6",
"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/19.77.34.5 Safari/537.1",
"Mozilla/5.0 (Windows NT 6.0) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.36 Safari/536.5",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.0 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24"
]
start_time = datetime.datetime.now()
def Grab_barrage(date):
# 偽裝請求頭
headers = {
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"origin": "https://www.bilibili.com",
"referer": "https://www.bilibili.com/video/BV1Z5411Y7or?from=search&seid=8575656932289970537",
"cookie": "_uuid=0EBFC9C8-19C3-66CC-4C2B-6A5D8003261093748infoc; buvid3=4169BA78-DEBD-44E2-9780-B790212CCE76155837infoc; sid=ae7q4ujj; DedeUserID=501048197; DedeUserID__ckMd5=1d04317f8f8f1021; SESSDATA=e05321c1%2C1607514515%2C52633*61; bili_jct=98edef7bf9e5f2af6fb39b7f5140474a; CURRENT_FNVAL=16; rpdid=|(JJmlY|YukR0J'ulmumY~u~m; LIVE_BUVID=AUTO4315952457375679; CURRENT_QUALITY=80; bp_video_offset_501048197=417696779406748720; bp_t_offset_501048197=417696779406748720; PVID=2",
"user-agent": random.choice(user_agent),
}
# 構造url訪問 需要用到的參數
params = {
'type': 1,
'oid': '128777652',
'date': date
}
# 發送請求 獲取響應
response = requests.get(url, params=params, headers=headers)
# print(response.encoding) 重新設置編碼
response.encoding = 'utf-8'
# print(response.text)
# 正則匹配提取數據
comment = re.findall('
# 將每條彈幕數據寫入txt
with open('barrages.txt', 'a+') as f:
for con in comment:
f.write(con + '\n')
time.sleep(random.randint(1, 3)) # 休眠
def main():
# 開多線程爬取 提高爬取效率
with ThreadPoolExecutor(max_workers=4) as executor:
executor.map(Grab_barrage, date_list)
# 計算所用時間
delta = (datetime.datetime.now() - start_time).total_seconds()
print(f'用時:{delta}s')
if __name__ == '__main__':
# 目標url
url = "https://api.bilibili.com/x/v2/dm/history"
start = '20200101'
end = '20200806'
# 生成時間序列
date_list = [x for x in pd.date_range(start, end).strftime('%Y-%m-%d')]
count = 0
# 調用主函數
main()
程序運行,成功爬取下彈幕數據并保存到txt。
用時:32.040222s
Process finished with exit code 0
三、繪制詞云圖
1. 讀取txt中彈幕數據
with open('barrages.txt') as f:
data = f.readlines()
print(f'彈幕數據:{len(data)}條')
運行結果如下:
彈幕數據:52708條
Process finished with exit code 0
2. Pyecharts 繪制詞云
import jieba
import collections
import re
from pyecharts.charts import WordCloud
from pyecharts.globals import SymbolType
from pyecharts import options as opts
from pyecharts.globals import ThemeType, CurrentConfig
CurrentConfig.ONLINE_HOST = 'D:/python/pyecharts-assets-master/assets/'
with open('barrages.txt') as f:
data = f.read()
# 文本預處理 去除一些無用的字符 只提取出中文出來
new_data = re.findall('[\u4e00-\u9fa5]+', data, re.S) # 只要字符串中的中文
new_data = " ".join(new_data)
# 文本分詞--精確模式分詞
seg_list_exact = jieba.cut(new_data, cut_all=True)
result_list = []
with open('stop_words.txt', encoding='utf-8') as f:
con = f.readlines()
stop_words = set()
for i in con:
i = i.replace("\n", "") # 去掉讀取每一行數據的\n
stop_words.add(i)
for word in seg_list_exact:
# 設置停用詞并去除單個詞
if word not in stop_words and len(word) > 1:
result_list.append(word)
print(result_list)
# 篩選后統計
word_counts = collections.Counter(result_list)
# 獲取前100最高頻的詞
word_counts_top100 = word_counts.most_common(100)
# 可以打印出來看看統計的詞頻
print(word_counts_top100)
word1 = WordCloud(init_opts=opts.InitOpts(width='1350px', height='750px', theme=ThemeType.MACARONS))
word1.add('詞頻', data_pair=word_counts_top100,
word_size_range=[15, 108], textstyle_opts=opts.TextStyleOpts(font_family='cursive'),
shape=SymbolType.DIAMOND)
word1.set_global_opts(title_opts=opts.TitleOpts('彈幕詞云圖'),
toolbox_opts=opts.ToolboxOpts(is_show=True, orient='vertical'),
tooltip_opts=opts.TooltipOpts(is_show=True, background_color='red', border_color='yellow'))
# 渲染在html頁面上
word1.render("彈幕詞云圖.html")
運行效果如下:
圖片
3. stylecloud 繪制詞云
# -*- coding: UTF-8 -*-
"""
@File :stylecloud_詞云圖.py
@Author :葉庭云
@CSDN :https://yetingyun.blog.csdn.net/
"""
from stylecloud import gen_stylecloud
import jieba
import re
# 讀取數據
with open('barrages.txt') as f:
data = f.read()
# 文本預處理 去除一些無用的字符 只提取出中文出來
new_data = re.findall('[\u4e00-\u9fa5]+', data, re.S)
new_data = " ".join(new_data)
# 文本分詞
seg_list_exact = jieba.cut(new_data, cut_all=False)
result_list = []
with open('stop_words.txt', encoding='utf-8') as f:
con = f.readlines()
stop_words = set()
for i in con:
i = i.replace("\n", "") # 去掉讀取每一行數據的\n
stop_words.add(i)
for word in seg_list_exact:
# 設置停用詞并去除單個詞
if word not in stop_words and len(word) > 1:
result_list.append(word)
print(result_list)
# stylecloud繪制詞云
gen_stylecloud(
text=' '.join(result_list), # 輸入文本
size=600, # 詞云圖大小
collocations=False, # 詞語搭配
font_path=r'C:\Windows\Fonts\msyh.ttc', # 字體
output_name='詞云圖.png', # stylecloud 的輸出文本名
icon_name='fas fa-apple-alt', # 蒙版圖片
palette='cartocolors.qualitative.Bold_5' # palettable調色方案
)
運行效果如下:
怎么樣,是不是很簡單?我們做程序員的,就是這么厲害,哈哈哈哈!
蘭州達內IT培訓機構12月免費試聽課火熱報名中,一線名師,高薪技術,入門技巧,面試訣竅,一站式打包學會 ,讓你學會最有前途的技術,順利通過面試,成功獲取高薪!馬上報名吧!
怕錢不夠?就業掙錢后再付學費; 怕學不會?0基礎入學,達內定制課程; 擔心就業? 近16萬家雇主企業,推薦名企就業