直接上代码,爬虫这东西,话太多,审核过不了啊~
import requests
import re
import csv
for pages in range(0, 250, 25):
douabntop50url = f'https://movie.douban.com/top250?start={pages}&filter='
request_head = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/96.0.4664.93 Safari/537.36"
}
resp_source_code = requests.get(url=douabntop50url, headers=request_head)
page_source_code = resp_source_code.text
analysis = re.compile(r'<li>.*?<div class="item">.*?<span class="title">(?P<name>.*?)'
r'</span>.*?<div class="bd">.*?<br>(?P<year>.*?) .*?<div class="star">'
r'.*?<span class="rating_num" property="v:average">(?P<score>.*?)</span>'
r'.*?<span>(?P<evaluators_people>.*?)</span>', re.S)
# '<br>'到年份有一大串空格,得用strip()清除,而其他标记,因为左右定位都是内容,所以没有空格
# 另外csv文件的用gbk编码解,utf-8会乱码;
match_result = analysis.finditer(page_source_code)
top_250 = open("top_250.csv", "a")
top_250csv = csv.writer(top_250)
for i in match_result:
# print(i.group("name").strip())
# print(i.group("year").strip())
# print(i.group("score").strip())
# print(i.group("evaluators_people").strip())
top = i.groupdict()
top["year"] = top["year"].strip()
top["year"] = top["year"] + "年"
top["score"] = top["score"] + "分"
top["name"] = "片名:" + top["name"]
top_250csv.writerow(top.values())
print(f"正在爬取排行耪{pages+1}-{pages+25}页")
top_250.close()
print(resp_source_code.apparent_encoding)
解释看我那个dytt文篇的那些,思路都差不多
结果:
?
后面还有好多好多~...
|