re模块数据解析
re模块使用1
import re
lst = re.findall(r"\d+","电话是;13220677880 , 我的电话是:18715211308")
print(lst)#直接打印出全部
lt = re.finditer(r"\d+","电话是;13220677880 , 我的电话是:18715211308")
for i in lt:#迭代器,一个一个返回
print(i.group())
s = re.search(r"\d+","电话是;13220677880 , 我的电话是:18715211308")
print(s.group()) #只返回一个结果
#预加载
obj = re.compile(r"\d+")
lst1 = obj.finditer("电话是;13220677880 , 我的电话是:18715211308")
for i in lst1:
print(i.group())
re模块使用2
import re
S = """
<div class='aa'><span id='1'>刘浩伟</span></div>
<div class='bb'><span id='2'>马云a啊</span></div>
<div class='cc'><span id='3'>马化腾</span></div>
<div class='dd'><span id='4'>李彦宏</span></div>
<div class='ee'><span id='5'>张一鸣</span></div>
<div class='ff'><span id='6'>刘强东</span></div>
"""
#obj1 = re.compile(r"<div class='.*?'><span id='\d+'>.*?</span></div>", re.S)
#result1 = obj1.finditer(S)
#for it in result1:
# print(it.group())
obj2 = re.compile(r"<div class='(?P<English>.*?)'><span id='(?P<id>\d+)'>(?P<name>.*?)</span></div>", re.S)
result2 = obj2.finditer(S)
for it in result2:
print(it.group("English"))
print(it.group("id"))
print(it.group("name"))
电影天堂爬取1
import requests
import re
domain = "https://www.dytt8.net"
resp = requests.get(domain, verify=False)
resp.encoding = 'gb2312'
obj1 = re.compile(r"最新电影更新.*?<ul>(?P<name>.*?)</ul>", re.S)
obj2 = re.compile(r"<a href='(?P<href>.*?)'", re.S)
obj3 = re.compile(r'◎片 名(?P<filmname>.*?)<br />.*?<a '
r'target="_blank" href="(?P<dowload>.*?)">')
result1 = obj1.finditer(resp.text)
film_list = []
for i in result1:
name = i.group('name')
result2 = obj2.finditer(name)
for it in result2:
film = domain + it.group('href').strip("/")
film_list.append(film)
for film in film_list:
film_resp = requests.get(film, verify=False)
film_resp.encoding = 'gb2312'
result3 = obj3.search(film_resp.text)
print(result3.group("filmname"))
print(result3.group("dowload"))
print("over!")
电影天堂爬取2
import requests
import re
domain = "https://www.dytt8.net"
resp = requests.get(domain, verify=False)
resp.encoding = 'gb2312'
obj1 = re.compile(r"最新电影更新.*?<ul>(?P<name>.*?)</ul>", re.S)
obj2 = re.compile(r"<a href='(?P<href>.*?)'>.*?年(?P<filmname>.*?)</a><br/>", re.S)
result1 = obj1.finditer(resp.text)
film_list = []
for i in result1:
name = i.group('name')
f = open("filmlist.txt", "w", encoding='gb2312')
f.write(domain + name)
f.close()
print("over!")
bs4模块数据解析
bs4爬取图片
import requests
from bs4 import BeautifulSoup
url = "http://www.netbian.com/weimei/"
resp = requests.get(url)
resp.encoding = "gbk"
main_page = BeautifulSoup(resp.text, "html.parser")
alist = main_page.find("div", class_="list").find_all("a")
for i in alist:
href = "http://www.netbian.com" + i.get('href')
child_page_resp = requests.get(href)
child_page_resp.encoding = "gbk"
child_page_text = child_page_resp.text
child_page = BeautifulSoup(child_page_text, "html.parser")
p = child_page.find("div", class_="endpage")
img = p.find("img")
src = img.get("src")
img_resp = requests.get(src)
img_name = src.split("/")[-1]
with open("img/"+img_name,mode="wb") as f:
f.write(img_resp.content)
print("over!!!", img_name)
xpath数据解析
xpath解析猪八戒
注意: pythoin3.5以后的版本etree时需要像下面我的代码一样引用不能再直接在lxml模块中直接引用了,python3.5以前的版本可以直接在lxml模块中引用。
import requests
from lxml import html
etree = html.etree
url = "https://zibo.zbj.com/search/f/?kw=saas"
resp = requests.get(url)
html = etree.HTML(resp.text)
divs = html.xpath("/html/body/div[6]/div/div/div[2]/div[5]/div[1]/div")
for div in divs:
price = div.xpath("./div/div/a/div[2]/div[1]/span[1]/text()")
title = div.xpath("./div/div/a/div[2]/div[2]/p/text()")
print(title)
|