from lxml import etree
from fake_useragent import UserAgent
import time
import json
import requests
import csv
import random
import codecs
import os
from multiprocessing.dummy import Pool
headers={
'User-Agent':UserAgent().random
}
url='https://www.bch.com.cn/Html/Hospitals/Departments/Overview0.html'
response=requests.get(url=url,headers=headers)
response.encoding = 'utf-8'
page_text=response.text
tree=etree.HTML(page_text)
li=[]
# for i in range(1,49):
# di={}
# name=tree.xpath('/html/body/div/div[7]/div/div/ul[1]/li[{}]//text()'.format(i))[0]
# url1=tree.xpath('/html/body/div/div[7]/div/div/ul[1]/li[{}]//@href'.format(i))[0]
# url1="https://www.bch.com.cn"+url1
# di["科室"]=name
# di["URL"]=url1
# li.append(di)
for i in range(1,3):#9
di={}
name=tree.xpath('/html/body/div/div[7]/div/div/ul[2]/li[{}]//text()'.format(i))[0]
url1=tree.xpath('/html/body/div/div[7]/div/div/ul[2]/li[{}]//@href'.format(i))[0]
url1="https://www.bch.com.cn"+url1
if name!="医学伦理委员会":
di["科室"]=name
di["URL"]=url1
li.append(di)
# for i in range(1,3):
# di={}
# name=tree.xpath('/html/body/div/div[7]/div/div/ul[4]/li[{}]//text()'.format(i))[0]
# url1=tree.xpath('/html/body/div/div[7]/div/div/ul[4]/li[{}]//@href'.format(i))[0]
# url1="https://www.bch.com.cn"+url1
# if name!="临床流行病学与循证医学中心":
# di["科室"]=name
# di["URL"]=url1
# li.append(di)
information=[]
for l in li:
url2=l["URL"]
response1=requests.get(url=url2,headers=headers)
time.sleep(float(format(random.uniform(0,2), '.2f')))
response1.encoding = 'utf-8'
page_text1=response1.text
tree1=etree.HTML(page_text1)
new_url1="https://www.bch.com.cn"+tree1.xpath('/html/body/div/div[6]/div[1]/div[2]/div[3]/h2/a/@href')[0]
print(new_url1)
response2=requests.get(url=new_url1,headers=headers)
time.sleep(float(format(random.uniform(0,2), '.2f')))
response2.encoding = 'utf-8'
page_text2=response2.text
tree2=etree.HTML(page_text2)
li_list=tree2.xpath('/html/body/div/div[6]/div[2]/div/ul/li')
for list in li_list:
infor={}
pict_url="https://www.bch.com.cn"+list.xpath('./a/img/@src')[0]
doctor_url="https://www.bch.com.cn"+list.xpath('./a/@href')[0]
doctor_name=list.xpath('./div/a/text()')[0]
infor["科室"]=l["科室"]
infor["名字"]=doctor_name
infor["医生链接"]=doctor_url
infor["医生图片链接"]=pict_url
information.append(infor)
def get_picture(dic):
url=dic["医生图片链接"]
pic=requests.get(url=url,headers=headers).content
#创建一个文件夹
if not os.path.exists('./pictures'):
os.mkdir('./pictures')
img_path = 'pictures/'+dic['名字']+'.png'
with open(img_path,'wb') as fp:
fp.write(pic)
print('下载成功!')
pool=Pool(4)
pool.map(get_picture,information)
# with codecs.open("医技科室和平台中心.json", 'a', 'utf-8') as fp:
# json.dump(information, fp, ensure_ascii=False,indent=4,separators=[",",":"])
|