豆瓣TOP250电影
听说所有爬虫学习都是从这开始的?
import requests
import re
from bs4 import BeautifulSoup
header = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.54 Safari/537.36 Edg/95.0.1020.30',
'Host': 'movie.douban.com'
}
r = requests.get('https://movie.douban.com/top250', headers=header)
titlehtml = re.findall('<span class="title">.+</span>', r.text)
for i in range(0, 10):
r = requests.get('https://movie.douban.com/top250?start=' + str(i * 25), headers=header)
title_html = re.findall('<span class="title">[\u4E00-\u9FFF]+</span>', r.text)
for j in title_html:
title = j[20:len(j) - 7:1]
print(title)
请求头信息用来通过反爬虫机制,匹配任意中文的正则用? [\u4E00-\u9FFF]+
爬取某小说网站动物庄园,分章节存为txt文件
# 创建并写入文件函数
def write(name, text):
f = open(name + '.txt', 'x')
f.write(text)
f.close()
r = requests.get('https://www.kanunu8.com/book3/6879/')
r.encoding = 'gb2312'
# 获取分章节链接
mainBody = re.findall('正文(.*?)</tbody>', r.text, re.S)
link = re.findall('<a href=".+.html">[\u4E00-\u9FFF]+</a>', mainBody[0])
# 构建章节名-链接字典
linkDic = {}
for str in link:
where = re.search('[0-9]+.html', str).group()
name = re.search('[\u4E00-\u9FFF]+', str).group()
linkDic.update({name: where})
# 提取章节网页正文,存于字典中
mainText = {}
for name in linkDic:
rr = requests.get('https://www.kanunu8.com/book3/6879/' + linkDic[name])
rr.encoding = 'GBK'
paragraph = re.search('<p>(.*?)</p>', rr.text, re.S).group(1)
paragraph = paragraph.replace('<br />', '')
mainText.update({name: paragraph})
write(name, paragraph)
|