手眼标定整体流程及原理参考b站视频手眼标定-原理与实战,讲的十分详细。 这里完成的是eye-in-hand类型的手眼标定。
import cv2
import numpy as np
import glob
from math import *
import pandas as pd
import os
K=np.array([[4283.95148301687,-0.687179973528103,2070.79900757240],
[0,4283.71915784510,1514.87274457919],
[0,0,1]],dtype=np.float64)
chess_board_x_num=11
chess_board_y_num=8
chess_board_len=10
def myRPY2R_robot(x, y, z):
Rx = np.array([[1, 0, 0], [0, cos(x), -sin(x)], [0, sin(x), cos(x)]])
Ry = np.array([[cos(y), 0, sin(y)], [0, 1, 0], [-sin(y), 0, cos(y)]])
Rz = np.array([[cos(z), -sin(z), 0], [sin(z), cos(z), 0], [0, 0, 1]])
R = Rz@Ry@Rx
return R
def pose_robot(x, y, z, Tx, Ty, Tz):
thetaX = x / 180 * pi
thetaY = y / 180 * pi
thetaZ = z / 180 * pi
R = myRPY2R_robot(thetaX, thetaY, thetaZ)
t = np.array([[Tx], [Ty], [Tz]])
RT1 = np.column_stack([R, t])
RT1 = np.row_stack((RT1, np.array([0,0,0,1])))
return RT1
def get_RT_from_chessboard(img_path,chess_board_x_num,chess_board_y_num,K,chess_board_len):
'''
:param img_path: 读取图片路径
:param chess_board_x_num: 棋盘格x方向格子数
:param chess_board_y_num: 棋盘格y方向格子数
:param K: 相机内参
:param chess_board_len: 单位棋盘格长度,mm
:return: 相机外参
'''
img=cv2.imread(img_path)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
size = gray.shape[::-1]
ret, corners = cv2.findChessboardCorners(gray, (chess_board_x_num, chess_board_y_num), None)
corner_points=np.zeros((2,corners.shape[0]),dtype=np.float64)
for i in range(corners.shape[0]):
corner_points[:,i]=corners[i,0,:]
object_points=np.zeros((3,chess_board_x_num*chess_board_y_num),dtype=np.float64)
flag=0
for i in range(chess_board_y_num):
for j in range(chess_board_x_num):
object_points[:2,flag]=np.array([(11-j-1)*chess_board_len,(8-i-1)*chess_board_len])
flag+=1
retval,rvec,tvec = cv2.solvePnP(object_points.T,corner_points.T, K, distCoeffs=None)
RT=np.column_stack(((cv2.Rodrigues(rvec))[0],tvec))
RT = np.row_stack((RT, np.array([0, 0, 0, 1])))
return RT
folder = r"calib"
'''
这个地方很奇怪的特点,有些棋盘格点检测得出来,有些检测不了,可以通过函数get_RT_from_chessboard的运行时间来判断
'''
good_picture=[1,3,5,6,7,8,10,11,12,13,14,15]
file_num=len(good_picture)
R_all_chess_to_cam_1=[]
T_all_chess_to_cam_1=[]
for i in good_picture:
image_path=folder+'/'+str(i)+'.bmp'
RT=get_RT_from_chessboard(image_path, chess_board_x_num, chess_board_y_num, K, chess_board_len)
R_all_chess_to_cam_1.append(RT[:3,:3])
T_all_chess_to_cam_1.append(RT[:3, 3].reshape((3,1)))
file_address='calib/机器人基坐标位姿.xlsx'
sheet_1 = pd.read_excel(file_address)
R_all_end_to_base_1=[]
T_all_end_to_base_1=[]
for i in good_picture:
RT=pose_robot(sheet_1.iloc[i-1]['ax'],sheet_1.iloc[i-1]['ay'],sheet_1.iloc[i-1]['az'],sheet_1.iloc[i-1]['dx'],
sheet_1.iloc[i-1]['dy'],sheet_1.iloc[i-1]['dz'])
R_all_end_to_base_1.append(RT[:3, :3])
T_all_end_to_base_1.append(RT[:3, 3].reshape((3, 1)))
R,T=cv2.calibrateHandEye(R_all_end_to_base_1,T_all_end_to_base_1,R_all_chess_to_cam_1,T_all_chess_to_cam_1)
RT=np.column_stack((R,T))
RT = np.row_stack((RT, np.array([0, 0, 0, 1])))
print('相机相对于末端的变换矩阵为:')
print(RT)
for i in range(len(good_picture)):
RT_end_to_base=np.column_stack((R_all_end_to_base_1[i],T_all_end_to_base_1[i]))
RT_end_to_base=np.row_stack((RT_end_to_base,np.array([0,0,0,1])))
RT_chess_to_cam=np.column_stack((R_all_chess_to_cam_1[i],T_all_chess_to_cam_1[i]))
RT_chess_to_cam=np.row_stack((RT_chess_to_cam,np.array([0,0,0,1])))
RT_cam_to_end=np.column_stack((R,T))
RT_cam_to_end=np.row_stack((RT_cam_to_end,np.array([0,0,0,1])))
RT_chess_to_base=RT_end_to_base@RT_cam_to_end@RT_chess_to_cam
RT_chess_to_base=np.linalg.inv(RT_chess_to_base)
print('第',i,'次')
print(RT_chess_to_base[:3,:])
print('')
opencv-python参考文档里面关于这一块资料较少,函数的输入条件也试了好半天才试明白,归根结底还是要懂这个eye-in-hand的原理。
|