以爬csdn为例:
代码:
import time
from selenium import webdriver
import pandas as pd
#保存为csv
def save_csv(arr, csv_filename=None):
"""Save the data in csv format"""
if csv_filename == None:
csv_filename="csv.csv"
arr_df = pd.DataFrame({'title': arr})
arr_df.to_csv(csv_filename, float_format='%.3f', index=True, header=True)
#1. 获取浏览器对象
# webdriver.Chrome(executable_path='executable_path = 'D:\pythonProject\\venv\Scripts\chromedriver.exe'')#没有导入驱动,就以相对路径的方式引入
browser = webdriver.Chrome()#将驱动导入到了scripts目录中
#2. 请求url
url = "https://www.csdn.net/"
#3. 窗口最大化
browser.maximize_window()
browser.get(url)
# send_keys:输入框内容
browser.find_element_by_css_selector("#toolbar-search-input").send_keys("爬虫真好玩")
# click:点击事件
browser.find_element_by_css_selector("#toolbar-search-button").click()
# clear:清空输入框内容
browser.find_element_by_css_selector("#toolbar-search-input").clear()
browser.switch_to_window(browser.window_handles[1])
time.sleep(5)
for i in range(5):
js = "document.documentElement.scrollTop=10000"
browser.execute_script(js)
time.sleep(2)
li_list=[]
li_list = browser.find_elements_by_xpath('//div[@class="so-items-normal"]/div[1]')
#print(li_list[0].text)
arr=[]
for i in li_list:
e=i.text
arr.append(e)
print(i.text)
save_csv(arr)
结果:
?
?
|