Python3 批量下载并保存PDF文件
from urllib.request import urlopen
from bs4 import BeautifulSoup
import requests
url = 'PDF列表地址'
html = urlopen(url)
bs = BeautifulSoup(html, 'html.parser')
# 使用bs.findAll()方法获取所有PDF链接
linkList = bs.findAll('a',{'class':'classname'})
links = []
# 获取链接地址
for link in linkList:
links.append(link.get('href'))
i = 1
# 逐一保存PDF文件
for link in links:
localfile = 'D:/papers/paper_' + str(i) + '.pdf'
urlfile = requests.get(link)
with open(localfile, 'wb+') as f:
f.write(urlfile.content)
i += 1
下载过程慢,以后有时间再做优化。
处理过程中发现总是在请求过程中,程序会中断,在请求之前等待两秒
import time
for link in links:
localfile = 'D:/papers/paper_' + str(i) + '.pdf'
time.sleep(2)
urlfile = requests.get(link)
with open(localfile, 'wb+') as f:
f.write(urlfile.content)
i += 1
|