同系列文章
python爬虫+pygal交互式可视化爬取大学QS排名_m0_61168705的博客-CSDN博客https://blog.csdn.net/m0_61168705/article/details/122699759?spm=1001.2014.3001.5501
文章目录
前言
之前写过爬取大学QS排名并用pygal可视化的代码,但串联式结构执行速度太慢,使用体验不太好。所以我尝试使用多线程进行改写代码,并通过可视化的方式展现多线程的优越性。
一、导入的库
相比前一篇多导入了四个库
multiprocessing(多线程库)
time(时间库)
matplotlib(经典作图库)
random(随机抽取库)
二、改写代码呈现
这次先上代码,再解释我这样处理的原因
# This is a py file to crawl the ranks of your university in QS.
import requests
import re
import pygal
import time
from multiprocessing.dummy import Pool
def getJson(url):
r=requests.post(url,headers=headers).json()
return r
def getSign(url):
sign=int(re.findall('https://www.qschina.cn/sites/default/files/qs-rankings-data/cn/(.*?)_indicators.txt',url)[0])
return sign
def parse(dict,sign):
reg=re.compile('[\u4E00-\u9FA5]+')
# This line is to find the Chinese characters, so the university's name should be in Chinese, then you can get the QS rank of it.
count=0
for d in dict['data']:
try:
name=re.search(reg,d['uni']).group()
# If the uni name is in English, it will return None. Use the group() to None will get an error.
if name in uName:
dataDict[name][Info[sign]]=int(d['overall_rank'])
count+=1
if count==nOfUniversity:
break
except:
pass
def getuName():
global inputTime
inputStart=time.time()
s=input("请将你想查的大学名称用中文输入(中间用‘,’隔开):")
inputEnd=time.time()
inputTime=inputEnd-inputStart
L=s.split(',')
return L
def darwLine():
line=pygal.Line()
for i in uName:
line.add(i,dataDict[i])
line.x_labels=year
line.y_title="QS排名"
line.x_title="年份"
line.title="近四年QS排名"
line.legend_at_bottom=True
line.render_to_file('查询结果.svg')
# Before you run this file, you should change the above line to your path.
def main():
global uName
uName=getuName()
global nOfUniversity
nOfUniversity=len(uName)
global dataDict
dataDict={}
for i in uName:
dataDict[i]=[None,None,None,None]
pool=Pool(4)
pool.map(lambda x: parse(getJson(x),getSign(x)),url)
darwLine()
if __name__ == '__main__':
startTime=time.time()
url=['https://www.qschina.cn/sites/default/files/qs-rankings-data/cn/397863_indicators.txt',
'https://www.qschina.cn/sites/default/files/qs-rankings-data/cn/914824_indicators.txt',
'https://www.qschina.cn/sites/default/files/qs-rankings-data/cn/2057712_indicators.txt',
'https://www.qschina.cn/sites/default/files/qs-rankings-data/cn/2122636_indicators.txt',
]
headers={
'user-agent': you need to add it
}
year=[2019,2020,2021,2022]
Info={397863:0,914824:1,2057712:2,2122636:3}
main()
endTime=time.time()
runTime=endTime-startTime-inputTime
print(runTime)
我的改写思路是用dataDict取代之前的D,省去最后还要通过for循环把数据集中起来。
其中出现过最大的bug就是因为多线程执行完的时间不一样,结果成了这样:
每个学校的排名没有串,但时间串了。
这里附上正确的图:
所以需要sign来规定数据的顺序。
三、运行时间可视化
在短时间内连续多次请求同一个url容易被封IP,所以我一共测10组(查询大学个数从1到10),每组均随机取样5个,用update()自定义函数求平均时间。
为保证每个学校都有数据,我选取了2019年前500名院校作为样本空间。
mainNormal()方法与第一篇的处理不同,是为了控制变量(只有多线程部分不同)。
测试和可视化代码如下:
import requests
import re
import time
import random
import matplotlib.pyplot as plt
from multiprocessing.dummy import Pool
import pygal
def getJson(url):
r=requests.post(url,headers=headers).json()
return r
def parseForName(dict):
global name
name=[]
reg=re.compile('[\u4E00-\u9FA5]+')
# This line is to find the Chinese characters, so the university's name should be in Chinese, then you can get the QS rank of it.
for d in dict['data'][:499]:
try:
name.append(re.search(reg,d['uni']).group())
except:
pass
def getSign(url):
sign=int(re.findall('https://www.qschina.cn/sites/default/files/qs-rankings-data/cn/(.*?)_indicators.txt',url)[0])
return sign
def parse(dict,sign):
reg=re.compile('[\u4E00-\u9FA5]+')
# This line is to find the Chinese characters, so the university's name should be in Chinese, then you can get the QS rank of it.
count=0
for d in dict['data']:
try:
name=re.search(reg,d['uni']).group()
# If the uni name is in English, it will return None. Use the group() to None will get an error.
if name in uName:
dataDict[name][Info[sign]]=int(d['overall_rank'])
count+=1
if count==nOfUniversity:
break
except:
pass
def darwLine():
line=pygal.Line()
for i in uName:
line.add(i,dataDict[i])
line.x_labels=year
line.y_title="QS排名"
line.x_title="年份"
line.title="近四年QS排名"
line.legend_at_bottom=True
line.render_to_file('查询结果.svg')
# Before you run this file, you should change the above line to your path.
def mainPool():
global nOfUniversity
nOfUniversity=len(uName)
global dataDict
dataDict={}
for i in uName:
dataDict[i]=[None,None,None,None]
pool.map(lambda x: parse(getJson(x),getSign(x)),url)
darwLine()
def mainNormal():
global nOfUniversity
nOfUniversity=len(uName)
global dataDict
dataDict={}
for i in uName:
dataDict[i]=[None,None,None,None]
for link in url:
parse(getJson(link),getSign(link))
darwLine()
def update(L,v,count):
if count==0:
L.append(v)
else:
L[-1]=(L[-1]*count+v)/(count+1)
def takeSamplesToTest(maxNum):
for n in range(1,maxNum+1):
for i in range(5):
global uName
uName=random.sample(name,n)
startTime=time.time()
mainNormal()
endTime=time.time()
runTime=endTime-startTime
update(timeListForNormal,runTime,i)
startTime=time.time()
mainPool()
endTime=time.time()
runTime=endTime-startTime
update(timeListForPool,runTime,i)
print(n)
if __name__ == '__main__':
headers={
'user-agent': you need to add it
}
url=['https://www.qschina.cn/sites/default/files/qs-rankings-data/cn/397863_indicators.txt',
'https://www.qschina.cn/sites/default/files/qs-rankings-data/cn/914824_indicators.txt',
'https://www.qschina.cn/sites/default/files/qs-rankings-data/cn/2057712_indicators.txt',
'https://www.qschina.cn/sites/default/files/qs-rankings-data/cn/2122636_indicators.txt',
]
year=[2019,2020,2021,2022]
Info={397863:0,914824:1,2057712:2,2122636:3}
parseForName(getJson('https://www.qschina.cn/sites/default/files/qs-rankings-data/cn/397863_indicators.txt'))
timeListForNormal=[]
timeListForPool=[]
pool=Pool(4)
takeSamplesToTest(10)
X=range(1,11)
fig, ax = plt.subplots()
ax.plot(X, timeListForNormal, label='Noraml')
ax.plot(X, timeListForPool, label='Pool')
ax.set_xlabel('The number of university')
ax.set_ylabel('time/s')
ax.set_title('To show the difference of runtime')
ax.legend()
plt.show()
四、可视化结果
可以看到多线程运行时间几乎稳定在2s,相比普通串联结构约有5s的时间差距,优越性可见一斑。
总结
本文对之前文章的爬虫代码加上了多线程,提高了运行效率,并对其提高效果进行了可视化。
|