目录
一、requests
????????1.1、介绍
????????1.2、requests案例
二、BeautifulSoup
????????2.1、介绍
????????2.2、安装
????????2.3、BeautifulSoup案例
三、selenium
? ? ? ? 3.1、介绍
? ? ? ? 3.2、selenium案例
四、总结
一、requests
????????1.1、介绍
????????????????requests这个库相信大家都有所耳闻,requests自称 "让HTTP服务于人类" 。因为requests中的接口简介方便,所以目前已经是爬虫玩家最爱的工具了(至少是博主的最爱)。
? ? ? ? ? ? ? ? ?官方文档:Requests: 让 HTTP 服务人类 — Requests 2.18.1 文档
????????1.2、requests案例
import requests
import re
from bs4 import BeautifulSoup
from selenium import webdriver
import os
'''
rqs.content 和 rqs.text 输出貌似是一样的,只是格式不同而已
'''
def requests_url(requests,url,headers):
rqs = requests.get(url=url,headers=headers)
if rqs.status_code == 200:
print("响应成功")
print("页面编码:", rqs.encoding)
print("content:", type(rqs.content))
print("text:", type(rqs.text))
print("响应头:", rqs.headers)
print("cookies:", rqs.cookies)
# 获取状态码为200则证明响应成功,可以接着做一些其他的事情
pass
else:
print("响应失败")
if __name__ == '__main__':
# 要爬取的url链接
url = 'https://v.qq.com/'
# headers中包括多种参数,可在页面f12查看header参数填写
headers = {
'user - agent': 'Mozilla / 5.0(Windows NT 10.0;Win64;x64) AppleWebKit / 537.36(KHTML, likeGecko) Chrome / 97.0.4692.71Safari / 537.36Edg / 97.0.1072.62',
'cookies': '_ga=GA1.2.6371039.1615789955; eas_sid=e1e6s1o8y6P5n3q4m5u0e3t314; pgv_pvid=3404159900; RK=We4QlR07wA; ptcz=c41ecc5aa5596efb435144cc312f65129e0fe5c740d8030dff614a671f6d5f56; tvfe_boss_uuid=1e3ffcf4a9941f29; o_cookie=2289550339; pac_uid=1_2289550339; pgv_info=ssid=s7852532080; midas_openkey=@GUPuObMQG; midas_openid=2289550339; dgqqcomrouteLine=a20220221fxfd_a20220314download; ied_qq=o2289550339; rv2=805DE51BF9D936734F033F8A9F76E46E6CB77BD3B5C802DC08; property20=5764B79D9D63533983A5D15A47D0EEE1C2CB03478C5885C354BD1C873DBB23722F41156BF5228533; nyzxqqcomrouteLine=a20220317nyzx; tokenParams=%3FsOpenid%3D11853442815032565208%26sSign%3D0FD296824F479F17D3D0281056A6D648%26_wv%3D49952%26ADTAG%3Dtgi.qq.share.qq; ptag=cn_bing_com; video_platform=2; video_guid=910ab70e6bdcc45b; bucket_id=9231002; ts_refer=cn.bing.com/; ts_uid=9431210527; vversion_name=8.2.95; video_omgid=910ab70e6bdcc45b; acctype=pt; QQLivePCVer=50221312; qv_als=mobG4XYcttwicAiVA11649817068STaWZA==; ts_last=v.qq.com/x/cover/mzc002001uhk73a.html; ad_play_index=64'
}
# 调用爬取网页函数
requests_url(requests,url,headers)
二、BeautifulSoup
????????2.1、介绍
? ? ? ? ? ? ? ? 我觉得BeautifulSoup最厉害的地方是他可以把html解析成一个对象,这样把html的每个标签分之,我们使用的时候就可以 Object.div.p.ul.li 来访问html标签为 <li> 的数据。这样再我们去网页抓取资源时可以准确的抓取到
? ? ? ? ? ? ? ? ?官方文档:Beautiful Soup 4.4.0 文档 — Beautiful Soup 4.2.0 中文 文档
??
????????2.2、安装
pip install bs4
????????2.3、BeautifulSoup案例
from bs4 import BeautifulSoup
import requests
import sys
from lxml import etree
def requests_url(url,headers):
p4 = requests.get(url, headers=headers)
soup = BeautifulSoup(p4.content, 'html.parser')
print(type(soup.prettify())) # <class 'str'>
'''
相当于把html解析成一个数据对象,可以通过.的方式访问html的标签
'''
print(soup.div.ul)
print(soup.div.ul.find(href="/vod-type-id-3-pg-1.html"))
print(soup.div.ul.li.a['title'])
print(type(soup.div)) # <class 'bs4.element.Tag'>
# 一次查找所有标签带a的
print(soup.find_all('a'))
if __name__ == '__main__':
url_list = ['https://4480.tv/vod-play-id-63360-src-3-num-{}.html'.format(i) for i in range(9, 16)]
headers = {
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.71 Safari/537.36 Edg/97.0.1072.62',
'cookies': 'PHPSESSID=g65ipam1c0kgj8sctkcbfmkq35; __51vcke__JSfg3PPYsm2jAeAF=59365898-12ee-5c79-aee8-c07a733dec1f; __51vuft__JSfg3PPYsm2jAeAF=1649736012578; __51uvsct__JSfg3PPYsm2jAeAF=2; __vtins__JSfg3PPYsm2jAeAF=%7B%22sid%22%3A%20%22475e118d-15a5-55ef-8abd-75e4e54116f2%22%2C%20%22vd%22%3A%2020%2C%20%22stt%22%3A%202315702%2C%20%22dr%22%3A%20678768%2C%20%22expires%22%3A%201649765561094%2C%20%22ct%22%3A%201649763761094%7D'
}
for url in url_list:
requests_url(url, headers)
sys.exit()
三、selenium
? ? ? ? 3.1、介绍
????????????????Selenium是一个用于Web应用程序测试的工具。Selenium测试直接运行在浏览器中,就像真正的用户在操作一样。支持的浏览器包括IE(7, 8, 9, 10, 11),Mozilla Firefox,Safari,Google?Chrome,Opera,Edge等
????????????????官方文档:Selenium with Python — Selenium Python Bindings 2 documentation
? ? ? ? 3.2、selenium案例
from selenium import webdriver
from bs4 import BeautifulSoup
import requests
import sys
from lxml import etree
import time
from selenium.webdriver.common.by import By
import os
def parser_url(url):
driver.get(url)
time.sleep(2)
# # \
# pwd = os.path.split(os.path.realpath(__file__))[0]
# # 保存快照
# driver.save_screenshot(pwd+'\快照.png')
try:
# aa = driver.find_elements_by_class_name(name="dplayer-video dplayer-video-current")
# aa = driver.find_element_by_id("viewport")
aa = driver.find_elements_by_tag_name("div") # 查找html标签为div的数据
# aa = driver.find_element(by=By.LINK_TEXT,value='电影').get_property("herf")
# aa = driver.title
print(aa) # list
for i in aa:
print(i.text)
except:
print("出错")
else:
driver.close()
sys.exit()
if __name__ == '__main__':
driver = webdriver.Chrome()
url_list = ['https://4480.tv/vod-play-id-63360-src-3-num-{}.html'.format(i) for i in range(9,16)]
print(url_list)
for url in url_list:
# headers = {'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.71 Safari/537.36 Edg/97.0.1072.62',
# 'cooker':'PHPSESSID=g65ipam1c0kgj8sctkcbfmkq35; __51vcke__JSfg3PPYsm2jAeAF=59365898-12ee-5c79-aee8-c07a733dec1f; __51vuft__JSfg3PPYsm2jAeAF=1649736012578; __51uvsct__JSfg3PPYsm2jAeAF=2; __vtins__JSfg3PPYsm2jAeAF=%7B%22sid%22%3A%20%22475e118d-15a5-55ef-8abd-75e4e54116f2%22%2C%20%22vd%22%3A%2020%2C%20%22stt%22%3A%202315702%2C%20%22dr%22%3A%20678768%2C%20%22expires%22%3A%201649765561094%2C%20%22ct%22%3A%201649763761094%7D'}
parser_url(url)
'''
官方文档示例
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
driver = webdriver.Chrome()
driver.get("http://www.python.org")
assert "Python" in driver.title
elem = driver.find_element_by_name("q")
elem.clear()
elem.send_keys("pycon")
elem.send_keys(Keys.RETURN)
assert "No results found." not in driver.page_source
driver.close()
'''
四、总结
? ? ? ? 其实除了上面写到的几个工具外,还有很多爬虫可以用的工具。如:urllib、urllib3等。由于不怎么了解就先不写了,后续再更吧。
|