-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathgrasp_eye-in-hand_demo.py
280 lines (231 loc) · 10.2 KB
/
grasp_eye-in-hand_demo.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
# -*- coding: utf-8 -*-
import pyrealsense2 as rs
import numpy as np
import cv2
import panda_py
from panda_py import libfranka, constants
import time
'''
Eye on hand transformation
'''
"""
PACK:
Average Camera to gripper rotation matrix:
[[ 0.01089461 -0.99938983 0.03318541]
[ 0.99974862 0.01023613 -0.01994814]
[ 0.01959627 0.03339439 0.99925012]]
Average Camera to gripper translation vector:
[[ 0.05780843]
[-0.06741266]
[-0.26024217]]
HORAUD:
Average Camera to gripper rotation matrix:
[[ 0.01085924 -0.99939097 0.03316284]
[ 0.99974926 0.01020169 -0.01993327]
[ 0.01958281 0.03337098 0.99925117]]
Average Camera to gripper translation vector:
[[ 0.05780947]
[-0.06741401]
[-0.26024155]]
DANIILIDIS
Average Camera to gripper rotation matrix:
[[ 0.00891022 -0.99862712 0.05161862]
[ 0.99955216 0.00742 -0.02899003]
[ 0.02856722 0.05185381 0.99824601]]
Average Camera to gripper translation vector:
[[ 0.05125611]
[-0.0717345 ]
[-0.25461249]]
"""
end_T_cam = np.array([[0.00891022, -0.99862712, 0.05161862, 0.05125611],
[ 0.99955216, 0.00742, -0.02899003, -0.0717345],
[ 0.02856722, 0.05185381, 0.99824601, -0.25461249],
[0., 0., 0., 1.0]])
"""
手动校准偏移量
"""
x_offset = -0.015
y_offset = -0.05
z_offset = -0.13
'''
相机设置
'''
pipeline = rs.pipeline() # 定义流程pipeline,创建一个管道
config = rs.config() # 定义配置config
config.enable_stream(rs.stream.depth, 1280, 720, rs.format.z16, 15) # 配置depth流
config.enable_stream(rs.stream.color, 1280, 720, rs.format.bgr8, 15) # 配置color流
pipe_profile = pipeline.start(config) # streaming流开始
# 创建对齐对象与color流对齐
align_to = rs.stream.color # align_to 是计划对齐深度帧的流类型
align = rs.align(align_to) # rs.align 执行深度帧与其他帧的对齐
'''
机械臂设置
'''
# 设置机械臂
hostname = "169.254.37.13" # Your Panda IP or hostname
panda = panda_py.Panda(hostname)
panda.set_default_behavior() # Panda default collision thresholds
# panda.move_to_start()
init_pose = panda_py.fk(constants.JOINT_POSITION_START)
# # print(init_pose)
panda.move_to_pose(init_pose)
joint_speed_factor = 0.05
cart_speed_factor = 0.05
stiffness=np.array([600., 600., 600., 600., 250., 150., 50.])
damping=np.array([50., 50., 50., 20., 20., 20., 10.])
dq_threshold=0.001
success_threshold=0.01
# Optionally set higher thresholds to execute fast trajectories
panda.get_robot().set_collision_behavior(
[100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0],
[100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0],
[100.0, 100.0, 100.0, 100.0, 100.0, 100.0],
[100.0, 100.0, 100.0, 100.0, 100.0, 100.0])
gripper = libfranka.Gripper(hostname)
box_pos = np.array([0.67852055, -0.14208797, 0.43913478])
# 全局变量用于存储鼠标点击的坐标
clicked_pixels = None
'''
获取对齐图像帧与相机参数
'''
def get_aligned_images():
frames = pipeline.wait_for_frames() # 等待获取图像帧,获取颜色和深度的框架集
aligned_frames = align.process(frames) # 获取对齐帧,将深度框与颜色框对齐
aligned_depth_frame = aligned_frames.get_depth_frame() # 获取对齐帧中的的depth帧
aligned_color_frame = aligned_frames.get_color_frame() # 获取对齐帧中的的color帧
#### 获取相机参数 ####
depth_intrin = aligned_depth_frame.profile.as_video_stream_profile().intrinsics # 获取深度参数(像素坐标系转相机坐标系会用到)
color_intrin = aligned_color_frame.profile.as_video_stream_profile().intrinsics # 获取相机内参
#### 将images转为numpy arrays ####
img_color = np.asanyarray(aligned_color_frame.get_data()) # RGB图
img_depth = np.asanyarray(aligned_depth_frame.get_data(), dtype=np.float32) # 深度图(默认16位)
return color_intrin, depth_intrin, img_color, img_depth, aligned_depth_frame
'''
获取像素点的三维坐标
'''
def get_3d_camera_coordinate(depth_pixel, aligned_depth_frame, depth_intrin):
x = depth_pixel[0]
y = depth_pixel[1]
dis = aligned_depth_frame.get_distance(x, y) # 获取该像素点对应的深度
# print ('depth: ',dis) # 深度单位是m
camera_coordinate = rs.rs2_deproject_pixel_to_point(depth_intrin, depth_pixel, dis)
# print ('camera_coordinate: ',camera_coordinate)
return dis, camera_coordinate
# 将相机坐标系下的物体坐标转移至机械臂底座坐标系下
def transform_camera_to_base(camera_xyz, base_T_cam):
# Append 1 to the camera coordinates to make them homogeneous
camera_xyz_homogeneous = np.append(camera_xyz, 1.0)
# Transform camera coordinates to arm coordinates using hand-eye matrix
base_xyz_homogeneous = np.dot(base_T_cam, camera_xyz_homogeneous)
# Remove the homogeneous component and return arm coordinates
base_xyz = base_xyz_homogeneous[:3]
return base_xyz
# 定义回调函数
def mouse_callback(event, x, y, flags, param):
global clicked_pixels
if event == cv2.EVENT_LBUTTONDOWN:
clicked_pixels = (x, y)
if __name__ == "__main__":
# 创建窗口并设置鼠标回调
cv2.namedWindow("Image")
cv2.setMouseCallback("Image", mouse_callback)
# 移动到初始位置
if panda.move_to_start():
gripper.homing()
# time.sleep(10)
# 末端执行器的方向
orientation = panda.get_orientation()
while True:
'''
获取对齐图像帧与相机参数
'''
color_intrin, depth_intrin, img_color, img_depth, aligned_depth_frame = get_aligned_images() # 获取对齐图像与相机参数
# 显示图像
cv2.imshow("Image", img_color)
'''
获取像素点三维坐标
'''
if clicked_pixels is not None:
# 在点击坐标处绘制红色圆圈
print('clicked_coordinates: ', clicked_pixels)
# cv2.circle(img_color, clicked_pixels, 10, (0, 0, 255), -1)
# cv2.imshow("Image", img_color)
# 相机坐标系下的x,y,z
dis, camera_xyz = get_3d_camera_coordinate(clicked_pixels, aligned_depth_frame, depth_intrin)
print('depth: %.6f m' % dis) # 深度单位是m
print('camera_xyz: ', camera_xyz)
end_T_base = panda.get_pose() # 4*4
# base_T_end = np.linalg.inv(end_T_base)
base_T_cam = np.dot(end_T_base, end_T_cam)
# end-effector坐标系下的x,y,z
base_xyz = transform_camera_to_base(camera_xyz, base_T_cam)
# print("base_xyz: ", base_xyz)
# 解决夹爪中心与末端的偏移
base_xyz[0] += x_offset
base_xyz[1] += y_offset
base_xyz[2] += z_offset
print("base_xyz: ", base_xyz)
# break
"""
使用关节空间控制机械臂移动:
- 通过position和orientation计算逆运动学参数,及关节运动参数
- 判断结果是否含有nan,没有则执行
- 问题: 该方法可以控制关节的运动速度,从而保证机械臂整体运动匀速,不会超过本身速度限制
但计算得出的关节参数仅仅可以到达目的点,无法固定orientation来避免与桌面碰撞。
因此可以通过moveit加入避障功能
"""
# q = panda_py.ik(position=base_xyz, orientation=orientation)
# print("q: ", q)
# if np.isnan(q).any():
# print(">>>> ik solve failure!!!!")
# clicked_pixels = None
# continue
# finished = panda.move_to_joint_position(q, speed_factor=0.1)
"""
使用笛卡尔空间控制机械臂运动:
- 优点: 易于理解,可以通过固定orientatiion参数保证机械臂末端方向不变,
某种程度上可以避免与桌面碰撞
- 问题: 速度空间在笛卡尔空间定义,容易触发机械臂关节速度限制
"""
finished = panda.move_to_pose(position=base_xyz, orientation=orientation,
speed_factor=0.02,
stiffness=np.array([600., 600., 600., 600., 250., 150., 50.]),
damping=np.array([50., 50., 50., 20., 20., 20., 10.]),
dq_threshold=0.001,
success_threshold=0.10)
time.sleep(1) # 观察精度
if finished:
print("finished: ", finished)
if gripper.grasp(width=0.01, speed=0.2, force=20, epsilon_inner=0.04, epsilon_outer=0.04):
# time.sleep(1) # 观察精度
if panda.move_to_start():
gripper.move(0.07, 0.2) # 释放物体
# if panda.move_to_start():
# if panda.move_to_pose(position=box_pos, orientation=orientation,
# speed_factor=0.05,
# stiffness=np.array([600., 600., 600., 600., 250., 150., 50.]),
# damping=np.array([50., 50., 50., 20., 20., 20., 10.]),
# dq_threshold=0.001,
# success_threshold=0.10):
# if gripper.move(0.07, 0.2):
# panda.move_to_start()
else:
print("抓取失败!!!")
time.sleep(1)
# panda.move_to_start()
panda.move_to_pose(init_pose)
# 重置点击坐标
clicked_pixels = None
else:
# 移动到初始位置
clicked_pixels = None
# panda.move_to_start()
panda.move_to_pose(init_pose)
# else:
# continue
# time.sleep(5)
# 等待按键事件,按下 ESC 键退出循环
key = cv2.waitKey(1) & 0xFF
if key == 27:
break
cv2.destroyAllWindows()