这大公司的大网站果然这反爬都太厉害了呜呜
这篇文章是xpath的练习项目调了我3个小时,
有很多技巧还是不会,比如是不是有直接获取这个标签下的所有标签,就不需要用嵌套try...except 来碰到错误停止了;
是不是能把所要检索的html通过标签单独挑出来来减少检索的时间,导致代码运行有点慢
import pprint
import re
import os
import time
import requests
from lxml import etree
"""
这篇文章是xpath的练习项目调了我3个小时,
有很多技巧还是不会,比如是不是有直接获取这个标签下的所有标签,就不需要用嵌套`try...except`来碰到错误停止了;
是不是能把所要检索的html通过标签单独挑出来来减少检索的时间,导致代码运行有点慢
"""
def get_html(url):
headers = {
'User-Agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.77 Safari/537.36",
'Connection': 'close'
}
r = requests.get(url=url, headers=headers)
r.encoding = r.apparent_encoding
time.sleep(0.5)
return r.text
def text_save(filename, text):
file = open(filename, 'w', encoding='UTF-8')
file.write(text)
file.close()
print("保存文件成功")
url = "https://linyi.58.com/ershoufang/"
if not os.path.getsize('./58.html'):
html = get_html(url)
text_save('./58.html', html)
h5 = etree.parse('./58.html', etree.HTMLParser())
result = etree.tostring(h5)
div_list = []
house = []
xpath_div = '//*[@id="__layout"]/div/section/section[3]/section[1]/section[2]/div[{}]/a/div[2]/'
for i in range(1, 120):
xpath = xpath_div.format(i)
div_list.append(xpath)
try:
i = 1
while 1:
xpath = xpath_div.format(i)
info = {}
title_xpath = xpath + 'div[1]/div[1]/h3/text()'
title = h5.xpath(title_xpath)[0].encode("ISO-8859-1").decode('utf-8') if len(h5.xpath(title_xpath)) else ''
info['title'] = title.replace(' ', ',')
house_shape_xpath = xpath + 'div[1]/section/div[1]/p[1]/span[{}]/text()'
s = h5.xpath(house_shape_xpath.format(1))[0].encode("ISO-8859-1").decode('utf-8')
house_shape = s
try:
cnt = 2
while 1:
s = h5.xpath(house_shape_xpath.format(cnt))[0].encode("ISO-8859-1").decode('utf-8')
house_shape = house_shape + s
cnt = cnt + 1
except:
pass
info['house_shape'] = house_shape
area_xpath = xpath + 'div[1]/section/div[1]/p[2]/text()'
area = h5.xpath(area_xpath)[0].encode("ISO-8859-1").decode('utf-8') if len(h5.xpath(area_xpath)) else ''
info['area'] = area.split()[0]
direction_xpath = xpath + 'div[1]/section/div[1]/p[3]/text()'
direction = h5.xpath(direction_xpath)[0].encode("ISO-8859-1").decode('utf-8') if len(h5.xpath(direction_xpath)) else ''
info['direction'] = direction.split()[0]
floor_info_xpath = xpath + 'div[1]/section/div[1]/p[4]/text()'
floor_info = h5.xpath(floor_info_xpath)[0].encode("ISO-8859-1").decode('utf-8') if len(h5.xpath(floor_info_xpath)) else ''
info['floor_info'] = floor_info.split()[0]
build_time_xpath = xpath + 'div[1]/section/div[1]/p[5]/text()'
build_time = h5.xpath(build_time_xpath)[0].encode("ISO-8859-1").decode('utf-8') if len(h5.xpath(build_time_xpath)) else ''
info['build_time'] = build_time.split()[0].replace('建造', '') if len(build_time.split()) else ''
community_xpath = xpath + 'div[1]/section/div[2]/p[1]/text()'
community = h5.xpath(community_xpath)[0].encode("ISO-8859-1").decode('utf-8') if len(h5.xpath(community_xpath)) else ''
info['community'] = community.split()[0]
address_xpath = xpath + 'div[1]/section/div[2]/p[2]/span[{}]/text()'
_ = []
try:
cnt = 1
while 1:
s = h5.xpath(address_xpath.format(cnt))[0].encode("ISO-8859-1").decode('utf-8')
_.append(s.split()[0])
cnt = cnt + 1
except:
pass
info['address'] = _
house.append(info)
i = i + 1
except:
pass
print(house)
|