网络爬虫基础4
以思维导图的方式记述爬虫学习基础,没有详细的信息,只有一些要点!
前言
记述了requests的使用,以及如何使用Python的多进程库multiprocessing来实现多线程爬虫,并实现一个爬虫案例。
思维导图
代码区
import requests
import re
''' 4.1 使用Python获取网页的源代码 '''
def get():
html_str = requests.get("http://exercise.kingname.info/exercise_requests_get.html").content.decode()
print(html_str,'\n')
'''
1.构建字典data,给各个元素赋具体的值
2.使用formdata或者json提交数据,参数为name和password
'''
def post():
data1 = {'name':'1','password':'2333'}
html1_formdata = requests.post("http://exercise.kingname.info/exercise_requests_post",data=data1).content.decode()
html2_json = requests.post("http://exercise.kingname.info/exercise_requests_post",json=data1).content.decode()
print(html1_formdata)
print(html2_json)
def requests_re():
data = {'name': '1', 'password': '2333'}
html2 = requests.post("http://exercise.kingname.info/exercise_requests_post", data=data).content.decode()
title = re.search('title>(.*?)<', html2, re.S).group(1)
content_list = re.findall('p>(.*?)<', html2, re.S)
content_str = ''.join(content_list)
print("页面标题是:", title)
print(content_list)
print("页面内容是:", content_str)
def requests_re_baidu():
html2 = requests.get("https://www.baidu.com").content.decode()
title = re.search('script>(.*?)</script', html2, re.S).group(1)
content_list = re.findall('http-equiv=(.*?) content',html2,re.S)
content_str = '\n'.join(content_list)
print("script:\n",title)
print(content_str)
''' 4.2 多线程爬虫 '''
from multiprocessing.dummy import Pool
def duoxiancheng_shiyon():
def calc_power2(num):
return num * num
pool = Pool(3)
origin_num = {x for x in range(10)}
result = pool.map(calc_power2, origin_num)
print(f'计算0-9的平方分别为:{result}')
def duoxiancheng1():
from multiprocessing.dummy import Pool;
def query(url):
requests.get(url);
url_list = [];
for i in range(100):
url_list.append('https://baidu.com');
pool = Pool(5);
pool.map(query,url_list);
爬虫案例
import re
import os
import requests
html = requests.get('http://www.kanunu8.com/book3/6879').content.decode('GB2312')
start_url = 'http://www.kanunu8.com/book3/6879/'
def get_toc(html,start_url):
toc_url_list = []
toc_block = re.findall('正文(.*?)</tbody>',html,re.S)[0]
toc_url = re.findall('href="(.*?)">',toc_block,re.S)
for url in toc_url:
toc_url_list.append(start_url+'/'+url)
return toc_url_list
def get_article(httl):
chapter_name = re.search('size="4">(.*?)</font>',httl,re.S).group(1)
text_block = re.search('<p>(.*?)</p>',httl,re.S).group(1)
text_block = text_block.replace('<br />','')
return chapter_name,text_block
def save(chapter,article):
os.makedirs('动物农场',exist_ok = True)
with open(os.path.join('动物农场',chapter+'.txt'),'a',encoding='utf-8') as f:
f.write(article)
httls = get_toc(html,start_url)
for h in httls:
httl = requests.get(h).content.decode('GB2312')
chapter,article = get_article(httl)
save(chapter,article)
效果图
效果看法
效果一般,还需改良,把章节内容写入文件的格式需要优化; 注意编码格式不是UTF-8
|