【代码自用】 sift_ran.py
import numpy as np
import cv2
from matplotlib import pyplot as plt
MIN_MATCH_COUNT = 4
img1p= cv2.imread('train5.jpeg')
img2p = cv2.imread('5train5.jpeg')
img1= cv2.cvtColor(img1p, cv2.COLOR_BGR2GRAY)
img2 = cv2.cvtColor(img2p, cv2.COLOR_BGR2GRAY)
sift = cv2.xfeatures2d.SIFT_create()
kp1, des1 = sift.detectAndCompute(img1, None)
kp2, des2 = sift.detectAndCompute(img2, None)
FLANN_INDEX_KDTREE = 0
index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
search_params = dict(checks=50)
flann = cv2.FlannBasedMatcher(index_params, search_params)
matches = flann.knnMatch(des1, des2, k=2)
good = []
for m, n in matches:
if m.distance < 0.7*n.distance:
good.append(m)
if len(good)>MIN_MATCH_COUNT:
src_pts = np.float32([kp1[m.queryIdx].pt for m in good]).reshape(-1, 1, 2)
dst_pts = np.float32([kp2[m.trainIdx].pt for m in good]).reshape(-1, 1, 2)
M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
matchesMask = mask.ravel().tolist()
h,w = img1.shape
pts = np.float32([ [0,0],[0,h-1],[w-1,h-1],[w-1,0] ]).reshape(-1,1,2)
dst = cv2.perspectiveTransform(pts,M)
img2 = cv2.polylines(img2,[np.int32(dst)],True,255,3, cv2.LINE_AA)
imgOut = cv2.warpPerspective(img2p, M, (img1.shape[1], img1.shape[0]), flags=cv2.INTER_LINEAR + cv2.WARP_INVERSE_MAP)
plt.subplot(121)
cv2.imwrite("imgOut.jpg",imgOut)
plt.imshow(imgOut)
plt.title('imgOut')
else:
print("Not enough matches are found - %d/%d" % (len(good),MIN_MATCH_COUNT))
matchesMask = None
draw_params = dict(matchColor = (0,255,0),
singlePointColor = None,
matchesMask = matchesMask,
flags = 2)
img3 = cv2.drawMatches(img1,kp1,img2,kp2,good,None,**draw_params)
plt.subplot(122)
plt.imshow(img3, 'gray')
plt.title('target<------>original picture')
plt.show()
0new_mian.py
import os
from PIL import Image
import cv2
import cv2 as cv
import numpy as np
from nsingle_numeracy_2 import zhizhen
from matplotlib import pyplot as plt
'''#1图像预处理'''
import numpy as np
import cv2
'''#4单表盘读数:剪掉圆盘区域再角点'''
def dushu(x1,y1,x2,y2):
print("开始读数!")
if x1 == x2:
n =0
result = 90
elif y1 == y2 and x1<x2:
n=2
result=0
elif y1 == y2 and x1 > x2:
n = 7
result = 180
else:
k = -(y2 - y1) / (x2 - x1)
result = np.arctan(k) * 57.29577
if x1 > x2 and y1 > y2:
result += 180
elif x1 > x2 and y1 < y2:
result += 180
elif x1 < x2 and y1 < y2:
result += 360
if 54<result<=90 :
n=0
elif 18<result<=54 :
n = 1
elif 0<=result<=18 or 360>=result>342:
n = 2
elif 306<result<=342 :
n = 3
elif 270<result<=306 :
n = 4
elif 234<result<=270 :
n = 5
elif 198<result<=234 :
n = 6
elif 162<result<=198 :
n = 7
elif 126<result<=162 :
n = 8
elif 90<result<=126:
n = 9
else:
print("读数发生错误!")
print("直线倾斜角度为:" + str(result) + "度,读数为", n)
return int(n),result
'''#3单表盘提取指针'''
def tiqu(src,a,b):
img = cv2.cvtColor(src, cv2.COLOR_BGR2HSV)
cv2.imshow('HSV_img', img)
cv2.waitKey(0)
low_hsv = np.array([150, 43, 46])
high_hsv = np.array([180, 255, 255])
mask_Red1 = cv2.inRange(img, lowerb=low_hsv, upperb=high_hsv)
low_hsv = np.array([0, 43, 46])
high_hsv = np.array([10, 255, 255])
mask_Red2 = cv2.inRange( img, lowerb=low_hsv, upperb=high_hsv)
maskRed = mask_Red1 + mask_Red2
cv2.imshow('Redmask1', mask_Red1)
cv2.waitKey(0)
mask = cv2.erode(maskRed, None, iterations=3 )
cv2.imshow("Redmask2", mask)
cv2.waitKey(0)
circle = np.zeros(mask.shape[0:2], dtype="uint8")
j = 1
while len(mask[mask == 255]) > 5:
maskcircle = cv2.circle(circle, (a, b), j, 255, -1)
maskcircle = cv2.add(mask, np.zeros(np.shape(mask), dtype=np.uint8), mask=maskcircle)
mask = mask - maskcircle
j = j + 1
cv2.imshow("point", mask)
cv2.waitKey(0)
cons = []
con = cv2.goodFeaturesToTrack(mask ,1, 0.9, 10)
if con is not None and len(con) > 0:
for x, y in np.float32(con).reshape(-1, 2):
cons.append((x, y))
cons_img = cv2.circle(src, (int(x), int(y)), 1, (0, 0, 255))
print(cons)
cv2.imshow('cons_img ', cons_img)
cv2.waitKey(0)
n, ang=dushu( a,b,cons[0][0], cons[0][1] )
return n,ang
def tiqublack(img,a,b,r):
low_hsv = np.array([0, 0, 0])
high_hsv = np.array([180, 255, 46])
mask= cv2.inRange(img, lowerb=low_hsv, upperb=high_hsv)
cv2.imshow('Blackmask1', mask)
cv2.waitKey(0)
mask = cv2.erode(mask, None, iterations=4)
mask = cv2.dilate(mask, None, iterations=1)
cv2.imshow("Blackmask2", mask )
circle = np.zeros(mask.shape[0:2], dtype="uint8")
j = 1
while len(mask[mask == 255]) >5:
maskcircle = cv2.circle(circle, (a, b), j, 255, -1)
maskcircle = cv2.add(mask, np.zeros(np.shape(mask), dtype=np.uint8), mask=maskcircle)
mask = mask - maskcircle
j = j + 1
cv2.imshow("point", mask)
cv2.waitKey(0)
cons = []
con = cv2.goodFeaturesToTrack(mask, 1, 0.001 , 1)
if con is not None and len(con) > 0:
for x, y in np.float32(con).reshape(-1, 2):
cons.append((x, y))
cons_img = cv2.circle(img, (int(x), int(y)), 1, (0, 0, 255))
print(cons)
cv2.imshow('cons_img ', cons_img)
linepic = cv2.line(img, (int(cons[0][0]), int(cons[0][1])), (int(a), int(b)), (0, 0, 255))
cv2.imshow('linepic', linepic)
cv2.waitKey(0)
n,ang=dushu(a,b,cons[0][0], cons[0][1])
return n, ang
def dividing(img):
src = img
ROI = np.zeros(src.shape, np.uint8)
gray = cv.cvtColor(src, cv.COLOR_BGR2GRAY)
cv2.imshow("gray", gray)
cv2.waitKey(0)
binary= cv2.adaptiveThreshold(gray,255,cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY,19,10)
cv2.imshow("ADAPTIVE_THRESH_MEAN_C", binary)
binary= cv2.medianBlur(binary, 5)
cv2.imshow("medianBlur", binary)
cv2.waitKey(0)
'''#2多表盘分割'''
circles = cv2.HoughCircles(binary, cv2.HOUGH_GRADIENT, 1, 50, param1=100, param2=1, minRadius=20, maxRadius=40)
circles = np.uint16(np.around(circles))
n=0
x = [[] for i in range(8)]
for i in circles[0, :]:
if n<8:
x[n].append(i[0])
x[n].append(i[1])
x[n].append(i[2])
n = n + 1
else :
break
cv2.imshow('circle', src)
cv2.waitKey(0)
print(x)
x=np.array(x)
x=x[x[:,0].argsort()]
print(x)
n = 0
ang = [0, 0, 0, 0, 0, 0, 0, 0]
k= [0, 0, 0, 0, 0, 0, 0, 0]
for i in range(len(x)):
circle = np.zeros(ROI.shape[0:2], dtype="uint8")
maskcircle = cv2.circle(circle, (x[i][0], x[i][1]), x[i][2]-2, 255, -1)
mask = cv2.add(src, np.zeros(np.shape(src), dtype=np.uint8), mask=maskcircle)
bg = np.ones_like(img, np.uint8) * 255
cv2.bitwise_not(bg, bg, mask=maskcircle)
cv2.imshow('bg.jpg', bg)
mask = mask + bg
cv2.imshow('result.jpg', mask)
cv2.waitKey(0)
if n >= 0 and n <= 3:
print(x[n])
ang[n],k[n]=tiqublack(mask,x[n][0],x[n][1],x[n][2])
elif 3 < n < 8:
ang[n],k[n]=tiqu(mask,x[n][0],x[n][1] )
n = n + 1
return ang,k
if __name__ == '__main__':
img = "train5.jpeg"
img = cv2.imread(img)
ang,k=dividing(img)
x=[]
y=[]
x.extend([ang[2],ang[0],ang[1],ang[3], ang[4],ang[6],ang[7],ang[5]])
y.extend([k[2],k[0],k[1],k[3], k[4],k[6],k[7],k[5]])
print("黑色指针部分水表读数为:", ang[2], ang[0], ang[1], ang[3])
print("红色指针部分水表读数为:", ang[4], ang[6], ang[7], ang[5])
print(x,y)
99kishihua,py
```python
import cv2
from lunwen_000 import dividing
from tkinter import *
from tkinter.filedialog import askopenfilename
frameT = Tk()
frameT.geometry('500x200+400+200')
frameT.title('选择需要输入处理的文件')
frame = Frame(frameT)
frame.pack(padx=10, pady=10)
frame_1 = Frame(frameT)
frame_1.pack(padx=10, pady=10)
frame1 = Frame(frameT)
frame1.pack(padx=10, pady=10)
v1 = StringVar()
v2 = StringVar()
ent = Entry(frame, width=50, textvariable=v1).pack(fill=X, side=LEFT)
ent = Entry(frame_1, width=50, textvariable=v2).pack(fill=X, side=LEFT)
global i,num
def fileopen():
file_sql = askopenfilename()
print(file_sql)
global i
i=file_sql
if file_sql:
v1.set(file_sql)
def number():
global i,num
img = cv2.imread(i)
ang = dividing(img)
print("黑色指针部分水表读数为:", ang[2], ang[0], ang[1], ang[3])
print("红色指针部分水表读数为:", ang[4], ang[6], ang[7], ang[5])
num = 1000 * ang[2] + 100 * ang[0] + 10 * ang[1] + 1 * ang[3] + 0.1 * ang[4] + 0.01 * ang[6] + 0.001 * ang[
7] + 0.0001 * ang[5]
if num:
v2.set(num)
cv2.destroyAllWindows()
btn = Button(frame, width=20, text='总文件', font=("宋体", 14), command=fileopen).pack(fil=X, padx=10)
ext = Button(frame1, width=10, text='退出', font=("宋体", 14), command=frameT.quit).pack(fill=X, side=LEFT)
etb = Button(frame_1, width=10, text='读数', font=("宋体", 14), command=number).pack(fill=X, padx=10)
frameT.mainloop()
|