明日方舟Wiki数据爬取

文章发布时间:

最后更新时间:

页面浏览: 加载中...

前言

Wiki百科是动态网页,我们爬取的方法是从网页获取url再在谷歌重新打开。详细参考python爬虫实战 scrapy+selenium爬取动态网页
代码源文件以及成果展示arknights_wiki
或者

1
2

gh repo clone muqiuwu/arknights_wiki

如果要运行爬虫,请在项目地址运行以下代码

1
scrapy crawl quotes -O quotes.json   #运行指令

以下是部分代码,不是复现,仅提供思路。

数据爬取

蜘蛛

头文件

1
2
3
4
5
6
7
8
9
10
import scrapy
from douban.items import DoubanItem
from bs4 import BeautifulSoup
import pandas as pd
import matplotlib.pyplot as plt
###头文件
plt.rcParams['font.sans-serif'] = 'SimSun'
plt.rcParams['axes.unicode_minus'] = False
ganyuan = pd.read_excel('干员总览.xlsx', sheet_name='Sheet1')
###设置中文格式

干员爬取

如果我们要爬取干员,有两个方法,第一种是直接在维基干员首页爬取干员信息。
但这样有个问题,就是只能爬取前五十个,我的解决方法是爬取先锋干员,以此类推。

1
2
3
4
5
6
7
allowed_domains = ['prts.wiki/']
start_urls = ['https://prts.wiki/w/%E5%B9%B2%E5%91%98%E4%B8%80%E8%A7%88#']
# allowed_domains = ['space.bilibili.com']
# start_urls = ['https://space.bilibili.com/1629347259/video']
# page = 10 # 如果想要爬取多页数据,需要定义爬取初始的页数,这里是10
# 后续页数的网址,需要自己观察翻页后网址的变化得出规律
# base_url = 'https://space.bilibili.com/1629347259/video?tid=0&page={}'

这里wiki里每一格是一个干员,我们可以写一个循环,方便爬取

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
# index    //*[@id="filter-result"]/div[1]
# name //*[@id="filter-result"]/div[1]/div[2]/div/div[1]
# cname //*[@id="filter-result"]/div[1]/div[2]/div/a/div
# cname //*[@id="filter-result"]/div[1]/div[2]/div/a/div
# hp //*[@id="filter-result"]/div[1]/div[4]/div[1]
# atk //*[@id="filter-result"]/div[1]/div[4]/div[2]
# re_ploy //*[@id="filter-result"]/div[1]/div[5]/div[1]
# code //*[@id="filter-result"]/div[1]/div[2]/div/div[3]
# 检查网页,得到这些信息的路径
def parse(self, response):

###### 下面是自己对html数据的处理逻辑

movie_list = response.xpath("//*[@id=\"filter-result\"]/div")
if movie_list and len(movie_list) > 0:
for movie in movie_list:
item = DoubanItem()
item['cname'] = movie.xpath("normalize-space(./div[2]/div/a/div/text())").get()
item['ename'] = movie.xpath("normalize-space(./div[2]/div/div[1]/text())").extract()[0]
item['jname'] = movie.xpath("normalize-space(./div[2]/div/div[2]/text())").extract()[0]
item['code'] = movie.xpath("normalize-space(./div[2]/div/div[3]/text())").extract()[0]
item['sub_occupation'] = movie.xpath("normalize-space(./div[3]/div/div[1]/text())").extract()[0]
item['influnce'] = movie.xpath("normalize-space(./div[3]/div/div[2]/text())").extract()[0]
item['place_of_birth'] = movie.xpath("normalize-space(./div[3]/div/div[3]/text())").extract()[0]
item['race'] = movie.xpath("normalize-space(./div[3]/div/div[4]/text())").extract()[0]
item['hp'] = movie.xpath("normalize-space(./div[4]/div[1]/text())").get()
item['atk'] = movie.xpath("normalize-space(./div[4]/div[2]/text())").extract()[0]
item['defe'] = movie.xpath("normalize-space(./div[4]/div[3]/text())").extract()[0]
item['res'] = movie.xpath("normalize-space(./div[4]/div[4]/text())").extract()[0]
item['re_deploy'] = movie.xpath("normalize-space(./div[5]/div[1]/text())").extract()[0]
item['cost'] = movie.xpath("normalize-space(./div[5]/div[2]/text())").extract()[0]
item['block'] = movie.xpath("normalize-space(./div[5]/div[3]/text())").extract()[0]
item['interval'] = movie.xpath("normalize-space(./div[5]/div[4]/text())").extract()[0]
item['sex'] = movie.xpath("normalize-space(./div[6]/div[1]/text())").extract()[0]
item['position'] = movie.xpath("normalize-space(./div[6]/div[2]/text())").extract()[0]
item['obtain'] = [movie.xpath(f"normalize-space(./div[7]/div[{i}]/text())").get()
for i in range(1,5)]
item['tag'] = movie.xpath("normalize-space(./div[8]/div[1]/text())").get(),movie.xpath("normalize-space(./div[8]/div[2]/text())").extract()[0],movie.xpath("normalize-space(./div[8]/div[3]/text())").extract()[0],movie.xpath("normalize-space(./div[8]/div[4]/text())").extract()[0]
item['feature'] = movie.xpath("normalize-space(./div[9]/div/div)").get()
yield item

藏品爬取

和干员爬取相似。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
allowed_domains = ['prts.wiki/']
start_urls = ['https://prts.wiki/w/%E8%90%A8%E5%8D%A1%E5%85%B9%E7%9A%84%E6%97%A0%E7%BB%88%E5%A5%87%E8%AF%AD/%E6%83%B3%E8%B1%A1%E5%AE%9E%E4%BD%93%E5%9B%BE%E9%89%B4']



def parse(self, response):

###### 下面是自己对html数据的处理逻辑
# index //*[@id="mw-content-text"]/div[1]
# number //*[@id="mw-content-text"]/div[1]/table[2]/tbody/tr[1]/th[1]
# //*[@id="mw-content-text"]/div[1]/table[2]/tbody/tr[1]/th[1]
# name //*[@id="mw-content-text"]/div[1]/table[2]/tbody/tr[1]/th[2]
# cost //*[@id="mw-content-text"]/div[1]/table[2]/tbody/tr[3]/td[1]/div/span
# feature //*[@id="mw-content-text"]/div[1]/table[2]/tbody/tr[3]/td[2]/b
# story //*[@id="mw-content-text"]/div[1]/table[2]/tbody/tr[3]/td[2]/i
movie_list = response.xpath("//*[@id=\"mw-content-text\"]/div[1]/table")
if movie_list and len(movie_list) > 0:
for movie in movie_list:
item = DoubanItem()
item['numb'] = movie.xpath("normalize-space(./tbody/tr[1]/th[1]/text())").get()
item['name'] = movie.xpath("normalize-space(./tbody/tr[1]/th[2]/text())").extract()[0]
item['cost'] = movie.xpath("normalize-space(./tbody/tr[3]/td[1]/div/span/text())").extract()[0]
if item['cost']:
item['feature'] = movie.xpath("normalize-space(./tbody/tr[3]/td[2]/b)").get()
item['story'] = movie.xpath("normalize-space(./tbody/tr[3]/td[2]/i/text())").extract()[0]
else:
item['feature'] = movie.xpath("normalize-space(./tbody/tr[3]/td/b)").get()
item['story'] = movie.xpath("normalize-space(./tbody/tr[3]/td/i/text())").extract()[0]
item['cost'] = movie.xpath("normalize-space(./tbody/tr[4]/td/span)").get()
# item['obtain'] = [movie.xpath(f"normalize-space(./div[7]/div[{i}]/text())").get()
# for i in range(1,5)]
# item['tag'] = movie.xpath("normalize-space(./div[8]/div[1]/text())").get(),movie.xpath("normalize-space(./div[8]/div[2]/text())").extract()[0],movie.xpath("normalize-space(./div[8]/div[3]/text())").extract()[0],movie.xpath("normalize-space(./div[8]/div[4]/text())").extract()[0]
# item['feature'] = movie.xpath("normalize-space(./div[9]/div/div)").get()
yield item

items文件

用来接收爬取的信息。只保留需要的,不需要的我一般注释掉。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
# Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html

import scrapy


class DoubanItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
# cname = scrapy.Field()
# ename = scrapy.Field()
# jname = scrapy.Field()
# code = scrapy.Field()
# sub_occupation = scrapy.Field()
# influnce = scrapy.Field()
# place_of_birth = scrapy.Field()
# race = scrapy.Field()
# hp = scrapy.Field()
# atk = scrapy.Field()
# defe = scrapy.Field()
# res = scrapy.Field()
# re_deploy = scrapy.Field()
# cost = scrapy.Field()
# block = scrapy.Field()
# interval = scrapy.Field()
# sex = scrapy.Field()
# position = scrapy.Field()
# obtain = scrapy.Field()
# tag = scrapy.Field()
# feature=scrapy.Field()
# # 干员爬取干员爬取干员爬取干员爬取干员爬取干员爬取干员爬取干员爬取干员爬取干员爬取干员爬取干员爬取干员爬取干员爬取干员爬取干员爬取干员爬取干员爬取干员爬取干员爬取干员爬取干员爬取

# numb =scrapy.Field()
# name =scrapy.Field()
# cost =scrapy.Field()
# feature =scrapy.Field()
# story =scrapy.Field()
# #藏品爬取藏品爬取藏品爬取藏品爬取藏品爬取藏品爬取藏品爬取藏品爬取藏品爬取藏品爬取藏品爬取藏品爬取藏品爬取藏品爬取藏品爬取藏品爬取藏品爬取藏品爬取藏品爬取藏品爬取藏品爬取藏品爬取藏品爬取

pass

数据整理

将json数据转换进excel里

干员整理

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
import pandas as pd
import json
from pathlib import Path
json_files = ["C:/Users/86183/Documents/WeChat Files/wxid_ot8jppgzo67v22/FileStorage/File/2024-07/先锋.json",
"C:/Users/86183/Documents/WeChat Files/wxid_ot8jppgzo67v22/FileStorage/File/2024-07/近卫男.json",
"C:/Users/86183/Documents/WeChat Files/wxid_ot8jppgzo67v22/FileStorage/File/2024-07/近卫女.json",
"C:/Users/86183/Documents/WeChat Files/wxid_ot8jppgzo67v22/FileStorage/File/2024-07/狙击.json",
"C:/Users/86183/Documents/WeChat Files/wxid_ot8jppgzo67v22/FileStorage/File/2024-07/术师.json",
"C:/Users/86183/Documents/WeChat Files/wxid_ot8jppgzo67v22/FileStorage/File/2024-07/医疗.json",
"C:/Users/86183/Documents/WeChat Files/wxid_ot8jppgzo67v22/FileStorage/File/2024-07/重装.json",
"C:/Users/86183/Documents/WeChat Files/wxid_ot8jppgzo67v22/FileStorage/File/2024-07/辅助.json",
"C:/Users/86183/Documents/WeChat Files/wxid_ot8jppgzo67v22/FileStorage/File/2024-07/特种.json"]
all_data=[]
for file in json_files:
with open(file, 'r', encoding='utf-8') as f:
data = json.load(f)
all_data.extend(data) # 将当前 JSON 文件的数据扩展到 all_data 列表中
df = pd.DataFrame(all_data)
data=df.rename(columns={
'cname':'中文名',
'ename':'英文名',
'jname':'日文名',
'code':'编号',
'sub_occupation':'子职业',
'influnce':'归属势力',
'place_of_birth':'出生地',
'race':'种族',
'hp':'血量',
'atk':'攻击',
'defe':'物防',
'res':'法防',
're_deploy':'再部署时间',
'cost':'费用',
'block':'阻挡数',
'interval':'攻速',
'sex':'性别',
'position':'定位',
'obtain':'获取方式',
'tag':'标签',
'feature':'特性'
})
data.to_excel('干员总览.xlsx', index=False) # 如果不需要保存索引列,可以设置index参数为False

藏品整理

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
import pandas as pd
import json
from pathlib import Path
json_files = ["C:/Users/86180/OneDrive/桌面/python/douban/quotes.json"]
all_data=[]
for file in json_files:
with open(file, 'r', encoding='utf-8') as f:
data = json.load(f)
all_data.extend(data) # 将当前 JSON 文件的数据扩展到 all_data 列表中
df = pd.DataFrame(all_data)
data=df.rename(columns={
'numb':'编号',
'name':'名字',
'cost':'获取',
'feature':'效果',
'story':'故事'
})
data.to_excel('萨卡兹的无终奇语想象实体图鉴.xlsx', index=False) # 如果不需要保存索引列,可以设置index参数为False

数据分析

饼状图

1
2
3
4
df=pd.read_excel("C:/Users/86183/Desktop/总览/json to csv or excel/干员总览.xlsx",sheet_name='Sheet1')

# 查看读取的数据
df.head()# 打印前几行数据
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
power_count = df['所属势力'].value_counts().reset_index()#df中所属势力下的所有值数个数排成dataframe,重新设置index
# print(power_count)
temp = 10 # 设定频数界限
than_ten = power_count[power_count['count'] >= temp].copy()
lower_than_ten_sum = power_count[power_count['count'] < temp]['count'].sum()
# 将小于频数界限的数据合并到一个 '其他' 类别
other_row = pd.DataFrame([{'所属势力': '其他', 'count': lower_than_ten_sum}])
result=pd.concat([than_ten,other_row])
# 可选的饼图配置
explode = [0.02] * len(result) # 各个分块的间距
colors = ['lavender', 'g', 'r', 'c', 'm', 'y', 'cyan', 'pink', 'orange', 'grey'][:len(than_ten)]
# 绘制饼图
plt.figure(figsize=(20, 20))
plt.pie(result['count'], explode=explode, labels=result['所属势力'],
colors=colors, autopct='%2.1f%%', textprops={'fontsize': 24})
plt.title('干员所属势力分布饼图')
plt.savefig('干员所属势力分布饼图.png')
plt.show()

直方图

1
2
3
4
5
6
7
8
9
10
11
12
# 读取数据
df=pd.read_excel("C:/Users/86180/OneDrive/桌面/python/干员总览.xlsx",sheet_name='Sheet1')
# 定义一个生成直方图的方法,参数有:‘标题’,标题大小,‘x轴名’,‘y轴名’,x轴旋转,x轴大小,y轴大小
def plot_bar_set(title,title_fontsize, xlabel, ylabel, x_rotation=0, x_fontsize=14, y_fontsize=14):
plt.title(title,fontsize=title_fontsize)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.xticks(rotation=x_rotation, fontsize=x_fontsize)
plt.yticks(fontsize=y_fontsize)
# 查看读取的数据
df.head()
# 打印前几行数据
1
2
3
4
5
6
7
8
9
# 使用df['子职业'].value_counts()统计了DataFrame df中'子职业'列的每个唯一值的出现次数,并将结果存储在grouped变量中。
grouped = df['子职业'].value_counts().reset_index()
# 绘制了柱状图,x轴为'子职业',y轴为'count',数据源为grouped
plt.figure(figsize=(120,40))
# 绘制了柱状图,x轴为'子职业',y轴为'count',数据源为grouped
plt.bar('子职业','count',data=grouped,width=0.7)
plot_bar_set('干员子职业分布统计',150,'子职业','个数',45,64,64)
# 保存
plt.savefig('干员子职业总览.png')

散点图

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
#  提取攻击和血量数据
attack = df['攻击']
hp = df['血量']


# 绘制散点图
plt.figure(figsize=(10, 6)) # 设置图的大小

plt.scatter(hp,attack, marker='o', color='blue', alpha=0.5) # 绘制散点图
plt.title('干员攻击与血量散点图') # 设置图的标题
plt.xlabel('血量') # 设置 x 轴标签
plt.ylabel('攻击') # 设置 y 轴标签

plt.grid(True) # 添加网格线
plt.tight_layout() # 调整布局,防止标签被切割
plt.savefig('干员攻击与血量散点图.png')
plt.show() # 显示图形

子职业相关

这里比较多我们只展示其中之一————DPS

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
import pandas as pd
import openpyxl
import matplotlib.pyplot as plt
# 设置中文及字符显示
plt.rcParams['font.sans-serif'] = 'SimSun'
plt.rcParams['axes.unicode_minus'] = False
df = pd.read_excel('干员总览.xlsx', sheet_name='Sheet1')
df.head()
df['攻速'] = df['攻速'].str.replace('s', '').astype(float)
df['普攻DPS'] = df['攻击'] / df['攻速']
df_sorted = df.sort_values(by='子职业')
average_dps = (df_sorted.groupby('子职业')['普攻DPS'].mean().reset_index())
average_dps = average_dps.sort_values(by='普攻DPS')
output_file = '干员普攻DPS统计.xlsx'
with pd.ExcelWriter(output_file, engine='openpyxl') as writer:
average_dps.to_excel(writer, sheet_name='普攻DPS统计', index=False)
plt.figure(figsize=(80, 40))
plt.bar(average_dps['子职业'], average_dps['普攻DPS'], color='skyblue')
plt.xticks(fontsize=60)
plt.yticks(fontsize=60)
plt.xlabel('子职业',fontsize=60)
plt.ylabel('平均普攻DPS',fontsize=60)
plt.title('不同子职业的平均普攻DPS',fontsize=60)
plt.xticks(rotation=45)
plt.tight_layout()
plt.savefig('平均普攻DPS柱状图.png')
plt.show()

部分成果

samdian
zhifang
wushangyijixian
zhongzu

改进

我们发现Wiki角色的详细界面的格式均为https://prts.wiki/w/nymph我们可以通过爬取的信息重写一个爬虫,循环爬取所有的角色详情页面。