导入需要的包
import requests
from bs4 import BeautifulSoup
import pandas as pd
import pprint
import json
①下载10个页面的HTML
page_indexs = range(0, 226, 25)
def download_all_htmls():#下载所有列表页面的HTML,用于后续分析
htmls = []
for idx in page_indexs:
url = f"https://movie.douban.com/top250?start={idx}&filter="
print("craw html:",url)
r = requests.get(url,headers={'User Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 Safari/537.36'})
#if r.status_code !=200:
#raise Exception("error")
htmls.append(r.text)
return htmls
htmls = download_all_htmls()#执行爬取
②解析所爬取的HTML页面得到数据
def parse_single_html(html):#解析单个HTML得到数据
soup=BeautifulSoup(html,'html.parser')
article_items = (soup.find("div",class_="article").find("ol", class_="grid_view").find_all("div", class_="item"))
datas=[]
for article_item in article_items:
rank=article_item.find("div",class_="pic").find("em").get_text()
info=article_item.find("div",class_="info")
title=info.find("div",class_="hd").find("span",class_="title").get_text()
stars=(
info.find("div",class_="bd")
.find("div",class_="star")
.find_all("span")
)
rating_star=stars[0]["class"][0]
rating_num=stars[1].get_text()
comments=stars[3].get_text()
datas.append({
"rank":rank,
"title":title,
"rating_star":rating_star.replace("rating","").replace("-t",""),
"rating_num":rating_num,
"comments":comments.replace("人评价","")
})
return datas
pprint.pprint(parse_single_html(htmls[0]))
all_datas=[]#执行所有的HTML页面解析
for html in htmls:
all_datas.extends(parse_single_html(html))
all_datas
len(all_datas)
③将结果导入Excel
df = pd.DataFrame(all_datas)
df
df.to_excel("豆瓣TOP250.xlsx")
|