准备
为什么爬取奥特曼,以前不需要我多说了吧~~~~ 首先准备一个网址,我也是拿出自己的资源来,给大家送福利啊 -->> 奥特曼url
代码实现
大致思路~~帮助大家理解代码 一个总网址爬取到所有奥特曼,然后分别爬取每个奥特曼的每一集,requests大家应该都很熟悉,这里介绍一下xpath使用
xpath使用
在网址找到需要爬取的数据,就可以copy到xpath,这里xpath相当于正则表达式,但是却比正则表达式简单的多。
import requests
from lxml import etree
import time
class Spider:
def __init__(self):
self.url = "https://tv.ci/sb/ke7nhZe3c1-.html?wd=%E5%A5%A5%E7%89%B9%E6%9B%BC&submit="
self.headers = {
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.61 Safari/537.36"}
def get_all_html(self):
count = 1
while True:
try:
print(f"总url第{count}次请求")
count += 1
response = requests.get(self.url, headers=self.headers, timeout=5)
except Exception:
time.sleep(3)
else:
text = response.content.decode()
return text
def get_all_url(self, text):
html = etree.HTML(text)
href = html.xpath('/html/body/div[1]/div/div/div[1]/div/div/div[2]/ul/li[*]/div[2]/h4/a/@href')
name = html.xpath('/html/body/div[1]/div/div/div[1]/div/div/div[2]/ul/li[*]/div[2]/h4/a/text()')
dict_text = dict(zip(name, [f"https://tv.ci/{i}" for i in href]))
print("所有url已爬取")
return dict_text
def get_html(self, name, url):
count = 1
while True:
try:
print(f"{name}第{count}次请求")
count += 1
response = requests.get(url, headers=self.headers, timeout=5)
except Exception:
time.sleep(3)
else:
text = response.content.decode()
return text
def get_url(self, url, text):
html = etree.HTML(text)
href = html.xpath('/html/body/div[1]/div/div[1]/div[3]/div/div[2]/div[1]/ul/li[*]/a/@href')
name = html.xpath('/html/body/div[1]/div/div[1]/div[3]/div/div[2]/div[1]/ul/li[*]/a/text()')
dict_text = dict(zip(name, [f"https://tv.ci/{i}" for i in href]))
return dict_text
def run(self):
url_text = ""
all_html = self.get_all_html()
dict_all_url = self.get_all_url(all_html)
for name, url in dict_all_url.items():
html = self.get_html(name, url)
dict_url = self.get_url(url, html)
url_text += name + "\n" + str(dict_url) + "\n"
print(f"{name}已爬取")
with open("./00文件夹/url/奥特曼.txt", "w", encoding="utf-8") as f:
f.write(url_text)
if __name__ == '__main__':
spider = Spider()
spider.run()
说明
目前爬取到全部的奥特曼uml,就差最后一步抓包,也可以直接点击观看,找不到视频的包,等我找到及时更新。
|