?有些验证码网站的图片链接是经过处理的,所以直接截图比较好,python截图代码如下:
from PIL import Image
#import pytesseract
from selenium import webdriver
# text = pytesseract.image_to_string(Image.open(r'C:\Users\windows7\Desktop\3.png'))
# print(text)
driver = webdriver.Chrome(executable_path=r'E:/python/chromedriver_win32/chromedriver.exe')
driver.get('http://img2.woyaogexing.com/2019/02/27/a4ff07bf5c2c47c5a015ac5adac4437f!600x600.jpeg')
driver.implicitly_wait(8)
#截全屏
# driver.get_screenshot_as_file('baidu.png')
driver.save_screenshot('full_baidu.png')
#只截取百度一下图标
img = driver.find_element_by_xpath('/html/body/img')
print(img.location)#{'x': 684, 'y': 301}
print(img.size)#{'height': 36, 'width': 100}
left = img.location['x']
top = img.location['y']
right = img.location['x'] + img.size['width']
bottom = img.location['y'] + img.size['height']
photo = Image.open('full_baidu.png')
photo = photo.crop((left, top, right, bottom))
photo.save('full_baidu.png')
scrapy完整代码如下:?
# -*- coding: utf-8 -*-
import scrapy
from selenium import webdriver
from selenium .webdriver .common .keys import Keys
import time
import os
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from scrapy import signals
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from PIL import Image
class BasicdemoSpider(scrapy.Spider):
name = 'basicdemo'
#allowed_domains = ['www.baidu.com']
start_urls = ['http://www.jinjianghotels.com/']
def __init__(self):
self.driver = webdriver.Chrome(executable_path=r'E:/python/chromedriver_win32/chromedriver.exe')
self.wait = WebDriverWait(self.driver, 10)
super().__init__()
def parse(self, response):
self.driver.maximize_window()
self.driver.get('https://hotel.bestwehotel.com/NewLogin/?go=http%3A%2F%2Fwww.jinjianghotels.com%2F')
time.sleep(8)
#self.driver.find_element_by_xpath('/html/body/div[1]/div/div[2]/div/div/ul/li[2]').click()
print(self.driver.page_source)
with open("fileName.txt","a+") as f:
f.write(self.driver.page_source.encode('GBK','ignore').decode('GBK'))
f.close()
self.driver.find_element_by_xpath('//*[@id="username"]').send_keys('12345678')
self.driver.find_element_by_xpath('//*[@id="password"]').send_keys('666666')
time.sleep(6)
a=self.driver.find_element_by_xpath('//*[@id="imgVcode"]').get_attribute('src')
print("这个获取的属性"+a)
self.driver.get(a)
time.sleep(6)
self.driver.save_screenshot('full_baidu.png')
img = self.driver.find_element_by_xpath('/html/body/img')
print(img.location)#{'x': 684, 'y': 301}
print(img.size)#{'height': 36, 'width': 100}
left = img.location['x']
top = img.location['y']
right = img.location['x'] + img.size['width']
bottom = img.location['y'] + img.size['height']
photo = Image.open('full_baidu.png')
photo = photo.crop((left, top, right, bottom))
photo.save('full_baidu.png')
#a=self.driver.find_element_by_xpath('/html/body/div[1]/div/div[2]/div/div/div[1]/form/div[1]/div[3]/img')
#print(a.get_attribute('src')).encode(‘utf-8’). decode(‘utf-8’)
# def spider_closed(self, spider):
# # 当爬虫退出的时候退出浏览器
# print('spider closed')
# self.driver.quit()
##下边的代码指的是官方给出的用signal自动关闭浏览器的方法
# @classmethod
# def from_crawler(cls, crawler, *args, **kwargs):
# spider = super(BasicdemoSpider, cls).from_crawler(crawler, *args, **kwargs)
# crawler.signals.connect(spider.closeSpider, signals.spider_closed)
# return spider#
# def closeSpider(self):
# self.driver.quit()
# print("代表运行")
|