最近测试了一个基本爬虫爬取200条百度百科代码如下,有需要的可以拿去,代码复制到pycharm里面可以直接运行,如果有什么疑问可以私信我(或vx:jlm0314)。 url管理器(Urlmanager.py)代码如下:
class Urlmanager(object):
def __init__(self):
self.new_urls=set()
self.old_urls=set()
def has_new_url(self):
'''
判断是否有未爬去的URL
:return
'''
return self.new_url_size()!=0
def get_new_url(self):
'''
获取一个未爬去的URL
:return:
'''
new_url=self.new_urls.pop()
self.old_urls.add(new_url)
return new_url
def add_new_url(self,url):
'''
将新的url添加到未爬去的URL集合中
:param url: 单个URL
:return:
'''
if url is None:
return
if url not in self.new_urls and url not in self.old_urls:
self.new_urls.add(url)
def add_new_urls(self,urls):
'''
将新的url添加到未爬去的URL集合中
:param urls: url集合
:return:
'''
if urls is None or len(urls)==0:
return
for url in urls:
self.add_new_url(url)
def new_url_size(self):
'''
获取未爬取URL集合大小
:return:
'''
return len(self.new_urls)
def old_url_size(self):
'''
获取已爬取URL集合的大小
:return:
'''
return len(self.old_urls)
html下载器(HtmlDownloader.py)代码如下:
import requests
class HtmlDownloader(object):
def download(self,url):
if url is None:
return None
user_agent='Mozilla/5.0 (Windows NT 10.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.61 Safari/537.36'
headers={'User-Agent':user_agent}
r=requests.get(url,headers=headers)
if r.status_code==200:
r.encoding='utf-8'
return r.text
return None
Html解析器(HtmlParser.py)代码如下:
import re
from urllib.parse import urljoin
from bs4 import BeautifulSoup
class HtmlParser(object):
def parser(self,page_url,html_cont):
'''
用于解析网页内容,提取url和数据
:param page_url: 下载页面的URL
:param html_cont: 下载的网页内容
:return: 返回URL和数据
'''
if page_url is None or html_cont is None:
return
soup=BeautifulSoup(html_cont,'html.parser')
new_urls=self._get_new_urls(page_url,soup)
new_data=self._get_new_data(page_url,soup)
return new_urls,new_data
def _get_new_urls(self,page_url,soup):
'''
抽取新的URL集合
:param page_url:下载页面的URL
:param soup: soup
:return: 返回新的URL集合
'''
new_urls=set()
links=soup.find_all('a',href=re.compile(r'^/item/[\%\w{2}]+/\d+'))
for link in links:
new_url=link['href']
new_full_url=urljoin(page_url,new_url)
new_urls.add(new_full_url)
return new_urls
def _get_new_data(self,page_url,soup):
'''
抽取有效数据
:param page_url:下载页面的URL
:param soup:
:return: 返回有效数据
'''
data={}
data['url']=page_url
title=soup.find('dd',class_='lemmaWgt-lemmaTitle-title').find('h1')
data['title']=title.get_text()
summary=soup.find('div',class_='lemma-summary')
data['summary']=summary.get_text()
return data
数据存储器(DataOutput.py):代码如下:
import codecs
class DataOutput(object):
def __init__(self):
self.datas=[]
def store_data(self,data):
if data is None:
return
self.datas.append(data)
def output_html(self):
fout=codecs.open('baike.html','w',encoding='utf-8')
fout.write("<html>")
fout.write("<body>")
fout.write("<table>")
for data in self.datas:
fout.write("<tr>")
fout.write("<td>%s.html</td>"%data['url'])
fout.write("<td>%s</td>"%data['title'])
fout.write("<td>%s</td>"%data['summary'])
fout.write("<tr>")
self.datas.remove(data)
fout.write("</table>")
fout.write("</body>")
fout.write("</html>")
fout.close()
爬虫调度器(SpiderMan),代码如下:
from firstspider.UrlManager import Urlmanager
from firstspider.HtmlDownloader import HtmlDownloader
from firstspider.HtmlParser import HtmlParser
from firstspider.DataOutput import DataOutput
class SpiderMan(object):
def __init__(self):
self.manager = Urlmanager()
self.downloader=HtmlDownloader()
self.parser=HtmlParser()
self.output=DataOutput()
def crawl(self,root_url):
self.manager.add_new_url(root_url)
while(self.manager.has_new_url() and self.manager.old_url_size()<200):
try:
new_url=self.manager.get_new_url()
html=self.downloader.download(new_url)
new_urls, data=self.parser.parser(new_url,html)
self.manager.add_new_urls(new_urls)
self.output.store_data(data)
print('已经抓取%s个链接'%self.manager.old_url_size())
except Exception:
print ('crawl failed')
self.output.output_html()
if __name__=="__main__":
spider_man=SpiderMan()
spider_man.crawl("https://baike.baidu.com/item/%E9%BE%99%E5%85%89%E9%9B%86%E5%9B%A2/1137665?fr=aladdin.html")
|