# -*- encode = utf-8 -*-
# @Time : 2022/1/23 13:26
# @Author : six
# @File : boss.py
# @software : PyCharm
import time
import xlwt
from selenium.webdriver import Chrome
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
def main():
baseurl = "https://www.xxxx.com/" # 某招聘网站的url
datalist = pasrse(baseurl)
savepath = "招聘.xls"
saveData(datalist,savepath)
def pasrse(baseurl):
datalist=[] # 创建数据存储列表
web = Chrome() # 创建浏览器对象
web.get(baseurl) # 模仿用户打开浏览器
time.sleep(3) # 等待浏览器加载完毕
# 查找搜索框幷发送用户想搜索的信息
web.find_element(By.XPATH,'//*[@id="wrap"]/div[3]/div/div[1]/div[1]/form/div[2]/p/input').send_keys("python",Keys.ENTER)
time.sleep(3)
# 调取获取页面信息次数
for i in range(1,9):
time.sleep(3)
# 查找到所有//div[@class="job-list"]//ul/li 含有li的文件
job_list = web.find_elements(By.XPATH,'//div[@class="job-list"]//ul/li')
if job_list:
for div in job_list:
# 在当前路径下查找工作岗位,地址,薪资,经验及学历、公司名称
job_name = div.find_element(By.XPATH,'./div/div[1]/div[1]/div/div[1]/span[1]/a').text
job_area = div.find_element(By.XPATH,'./div/div[1]/div[1]/div/div[1]/span[2]/span').text
salary = div.find_element(By.XPATH,'./div/div[1]/div[1]/div/div[2]/span').text
ex_ed = div.find_element(By.XPATH,'./div/div[1]/div[1]/div/div[2]/p').text
company_name = div.find_element(By.XPATH,'./div/div[1]/div[2]/div/h3/a').text
# ic(job_name,job_area,salary,ex_ed,company_name) # 信息打印,是否正确
# 将所有数据保存进一个data列表
data = [job_name,job_area,salary,ex_ed,company_name]
# print(data)
# 将所有数据添加进datalist
datalist.append(data)
# 查找到下一页的点击按钮
next_page = web.find_element(By.CSS_SELECTOR,'.next')
# 当获取当前页面数据完毕后,自动点击下一页获取信息
next_page.click()
else:
break
return datalist
def saveData(datalist, savepath):
print("数据正在保存请等待...")
book = xlwt.Workbook(encoding="utf-8",style_compression=0) # 创建一个工作对象
sheet = book.add_sheet("招聘信息python数据",cell_overwrite_ok=True) # 创建表对象
col = ("岗位","工作地址","薪资","经验及学历","公司名称") # 定义列名称
for i in range(0,5):
sheet.write(0,i,col[i])
for i in range(0,235):
print("第%d条数据添加完毕" % (i+1))
data = datalist[i]
for j in range(0,5):
sheet.write(i+1,j,data[j])
book.save(savepath)
if __name__ == '__main__':
main()
print("数据爬取完毕!")
我也搞不懂,我代码也没出现具体的网址啊,咋就审核不过啊。1.我把网址改成xxx了,2.我这又不是抄别人的。
|