声明:只是用来学习,请不要使用非法用途,责任自负
虽然最近疫情总是反反复复,
但还是有许多小伙伴后台私信我有没有可以抢课的方法,
于是这个教程就来了
不过咱该说的还是说,别整违法的事情!
工具
使用python 3.6版本,安装如下库:
- 安装win32api
-pip3 install pywin32 - 安装PIL
-pip install Pillow - 安装pyautogui
-pip install pyautogui - 安装numpy
-pip install numpy - 安装cv2
-pip install opencv-python - 安装matplotlib
-pip install matplotlib
使用SPY查看相关窗口标题, 类名。 此标题唯一, 故可以以此来查找相关窗口
得到窗口句柄
window_title = '课件学习 - Google Chrome'
screen_width = win32api.GetSystemMetrics(0)
screen_height = win32api.GetSystemMetrics(1)
hwnd = win32gui.FindWindow(win32con.NULL,window_title)
if hwnd == 0 :
error_exit('%s not found' % window_title)
exit()
else:
print('hwnd = %x'%(hwnd))
window_left,window_top,window_right,window_bottom = win32gui.GetWindowRect(hwnd)
主循环
原理:
主要通信截图屏幕的图片,
然后通过模板图像与之比较,
如果出现我们需要的场景,
那么得到对应的位置坐标,
然后自动调用点击功能,
从而实现自动化操作。
那么这里主要使用opencv的两个算法, 一个是图像相似度打分算法, 另一个是图像搜索算法。
while True:
grab_image = snapshot.grab_screen(deal_left,deal_top,deal_right,deal_bottom)
grab_image.save(r'.\tmp_output\full_screen.png')
full_screen_w = 1936
full_screen_h = 1056
pixel_core_x = 877.0
pixel_core_y = 25.0
deal_left = window_left
deal_top = window_top + pixel_core_y / full_screen_h * window_height - 20
deal_right = window_left + window_width
deal_bottom = window_top + pixel_core_y / full_screen_h * window_height + 20
grab_image = snapshot.grab_screen(deal_left,deal_top,deal_right,deal_bottom)
search_pic = r'.\tmp_output\search_kejianxuexi.png'
grab_image.save(search_pic)
template_pic = r'.\template\kejian_tem.png'
num, w, h, pos_list = match.lookup_pos(template_pic, search_pic)
left = 0
top = 0
find_kejian_flag = 0
no_voice_flag = 0
if num == 1:
left = pos_list[0][0]
top = pos_list[0][1]
find_kejian_flag = 1
else:
print('==========warning search_kejianxuexi = ' + str(num))
find_kejian_flag = 0
if find_kejian_flag:
img_rgb = cv2.imread(search_pic)
img_rgb = img_rgb[top:top + h, left:left + w + 80, :]
compare_pic = r'.\tmp_output\kejianxuexi_compare.png'
cv2.imwrite(compare_pic, img_rgb)
temp_voice = r'.\template\kejianhua_tem_voice.png'
temp_no_voice = r'.\template\kejianhua_tem_no_voice.png'
no_voice_flag = match.score_pic(compare_pic, temp_voice, temp_no_voice)
if no_voice_flag:
print('===============find no_voice_flag')
find_question_flag = find_question()
if find_question_flag:
time.sleep(5)
find_daan()
time.sleep(5)
find_quding()
find_chongbo_flag = find_chong_bo()
if find_question_flag and find_chongbo_flag:
print('========>find_chongbo_flag and find_chongbo_flag')
exit()
if find_chongbo_flag:
weikaishi()
else:
print('===============every thing is ok')
time.sleep(2)
图像相似度打分算法
那么如何判断一张被PS过的图片是否与另一张图片本质上相同呢?
比较简单、易用的解决方案是采用感知哈希算法(Perceptual Hash Algorithm)。
感知哈希算法是一类算法的总称, 包括aHash、pHash、dHash。
顾名思义,感知哈希不是以严格的方式计算Hash值, 而是以更加相对的方式计算哈希值,因为“相似”与否, 就是一种相对的判定。
aHash:平均值哈希。速度比较快,但是常常不太精确。 pHash:感知哈希。精确度比较高,但是速度方面较差一些。 dHash:差异值哈希。Amazing!精确度较高,且速度也非常快。因此我就选择了dHash作为我图片判重的def
pHash(imgfile):
"""get image pHash value"""
img=cv2.imread(imgfile, 0)
img=cv2.resize(img,(64,64),interpolation=cv2.INTER_CUBIC)
h, w = img.shape[:2]
vis0 = np.zeros((h,w), np.float32)
vis0[:h,:w] = img
vis1 = cv2.dct(cv2.dct(vis0))
vis1.resize(32,32)
img_list=(vis1.tolist())
print('----------')
sum(img_list)
avg = sum(img_list)/(len(img_list)*1.0)
print('----------')
avg_list = ['0' if i<avg else '1' for i in img_list]
return ''.join(['%x' % int(''.join(avg_list[x:x+4]),2) for x in range(0,32*32,4)])
def hammingDist(s1, s2):
assert len(s1) == len(s2)
return sum([ch1 != ch2 for ch1, ch2 in zip(s1, s2)])
def aHash(img):
img=cv2.resize(img,(8,8),interpolation=cv2.INTER_CUBIC)
gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
s=0
hash_str=''
for i in range(8):
for j in range(8):
s=s+gray[i,j]
avg=s/64
for i in range(8):
for j in range(8):
if gray[i,j]>avg:
hash_str=hash_str+'1'
else:
hash_str=hash_str+'0'
return hash_str
def cmpHash(hash1,hash2):
n=0
if len(hash1)!=len(hash2):
return -1
for i in range(len(hash1)):
if hash1[i]!=hash2[i]:
n=n+1
return 1 - n / 64
def score_pic(compare_pic, temp_voice, temp_no_voice):
img1 = cv2.imread(compare_pic)
img2 = cv2.imread(temp_voice)
img3 = cv2.imread(temp_no_voice)
hash1 = aHash(img1)
hash2 = aHash(img2)
voice_score = cmpHash(hash1, hash2)
hash1 = aHash(img1)
hash3 = aHash(img3)
no_voice_score = cmpHash(hash1, hash3)
no_voice_flag = 0
if no_voice_score >= voice_score:
no_voice_flag = 1
else:
no_voice_flag = 0
return no_voice_flag
图像搜索算法
使用res= cv2.matchTemplate(img_gray,template,cv2.TM_CCOEFF_NORMED)
def lookup_pos(template_pic, search_pic):
img_rgb = cv2.imread(search_pic)
img_gray = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2GRAY)
img = img_gray
template = cv2.imread(template_pic,0)
w, h = template.shape[::-1]
res = cv2.matchTemplate(img,template,cv2.TM_CCOEFF_NORMED)
threshold = 0.95
loc = np.where( res >= threshold)
num = 0
left = 0
top = 0
pos_list = []
for pt in zip(*loc[::-1]):
cv2.rectangle(img_rgb, pt, (pt[0] + w, pt[1] + h), (0,0,255), 2)
left = pt[0]
top = pt[1]
pos_list.append(pt)
num = num + 1
res = res*256
cv2.imwrite(r'.\tmp_output\out.png', img_rgb)
cv2.imwrite(r'.\tmp_output\res.png', res)
return num, w, h, pos_list
其实上课是一件美事,
只是你们自己还不知道…
今天的文章就到这里啦~
我是小熊猫,咱下篇文章再见啦(????)
|