先上代码
# encoding:utf-8
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import os
import pytesseract
from PIL import Image
import time
import re
import json
import requests
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
# pip install selenium==3.141.0
# 使用该死的selenium时请把版本将至3.141,否则在使用连环find_elements时会报错
d = DesiredCapabilities.CHROME
d['loggingPrefs'] = { 'performance':'ALL' }
chrome_options = Options()
chrome_options.add_argument('--disable-gpu')
chrome_options.add_argument('--no-sandbox')
chrome_options.add_experimental_option('w3c', False)
driver = webdriver.Chrome(desired_capabilities=d, options=chrome_options)
driver.get("http://www.ishuyin.com/show.php?id=15626#liebiao")
links = driver.find_element(By.CLASS_NAME,"box").find_elements(By.TAG_NAME,"a")
count = 2
hrefs = []
for link in links:
hrefs.append(link.get_attribute("href"))
for href in hrefs:
driver.get(href)
name = driver.find_element(By.CLASS_NAME,"bread").get_attribute("innerHTML")[4:]
name = "第"+str(count)+"集 " + name
count += 1
driver.find_element(By.ID,"play").click()
urls = []
for log in driver.get_log('performance'):
log_entry = json.loads(log['message'])
try:
#该处过滤了data:开头的base64编码引用和document页面链接
if "data:" not in log_entry['message']['params']['request']['url'] and 'Document' not in log_entry['message']['params']['type']:
urls.append(log_entry['message']['params']['request']['url'])
except Exception as e:
pass
for url in urls:
if(url[-4:]==".mp3"):
print("正在下载"+name)
down_res = requests.get(url=url)
with open(name+".mp3","wb") as file:
file.write(down_res.content)
print(name+"下载完毕...")
break
input()
driver.quit()
好了,接下来是三个说明:
第一,selenium 4的版本是有bug的,连续使用find_element或find_elements会报错,这个是版本问题,无解,解决方法很简单,就是直接把版本降级成为selenium 3.141.0即可。
pip unsintall selenium
pip install selenium==3.141.0?
第二,打开chrome的performance(开发者工具)需要加上chrome_options.add_experimental_option('w3c', False)
第三,url获取之后建议不要直接操作,会出现一些莫名其妙的异常,所以最好是先弄个数组存起来,然后再遍历一次进行处理。反正就这点数据量,再遍历一遍也影响不大。
|