Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Jan30 sub contact #1

Open
wants to merge 8 commits into
base: sapien_mar24
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Binary file removed 1-1-1.pcd
Binary file not shown.
Binary file removed 1-3-1.pcd
Binary file not shown.
Binary file removed 3-1-1.pcd
Binary file not shown.
2 changes: 1 addition & 1 deletion automate.sh
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

rm ~/ocrtoc_ws/src/result.txt

tasks=('0-0' '1-1-1' '1-1-2' '1-1-3' '1-2-1' '1-2-2' '1-3-1' '1-4-1' '1-4-2' '1-5-1' '1-5-2' '1-5-3' '2-1-1' '2-1-2' '2-2-1' '2-2-2' '2-2-3' '2-2-4' '3-1-1' '3-1-2' '3-2-1' '3-2-2' '3-3-1' '3-3-2' '4-1-1' '4-1-2' '4-2-1' '4-2-2' '4-2-3' '4-2-4' '4-3-1' '4-3-2' '4-3-3' '4-3-4' '5-1-1' '5-1-2' '5-1-3' '5-2-1' '5-2-2' '5-3-1' '5-3-2' '5-3-3' '6-1-1' '6-1-2' '6-2-1' '6-2-2' '6-2-3' '6-3-1' '6-3-2' '6-3-3')
tasks=( '1-1-1' '1-1-2' '1-1-3' '1-1-1' '1-2-1' '1-2-2' '1-3-1' '1-4-1' '1-4-2' '1-5-1' '1-5-2' '1-5-3' '2-1-1' '2-1-2' '2-2-1' '2-2-2' '2-2-3' '2-2-4' '3-1-1' '3-1-2' '3-2-1' '3-2-2' '3-3-1' '3-3-2' '4-1-1' '4-1-2' '4-2-1' '4-2-2' '4-2-3' '4-2-4' '4-3-1' '4-3-2' '4-3-3' '4-3-4' '5-1-1' '5-1-2' '5-1-3' '5-2-1' '5-2-2' '5-3-1' '5-3-2' '5-3-3' '6-1-1' '6-1-2' '6-2-1' '6-2-2' '6-2-3' '6-3-1' '6-3-2' '6-3-3')

# tasks=('5-2-1' '5-3-1' '6-1-1' '6-2-1' '6-3-1')

Expand Down
Binary file modified contact_graspnet/grasps_temp.npy
Binary file not shown.
Binary file modified contact_graspnet/results/predictions_temp.npz
Binary file not shown.
Binary file modified contact_graspnet/temp.npy
Binary file not shown.
Binary file modified contact_graspnet/temp_colors.npy
Binary file not shown.
Binary file renamed 1-2-1.pcd → kin_pcd.pcd
Binary file not shown.
12 changes: 8 additions & 4 deletions ocrtoc_perception/config/perception_franka_in_real.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -17,8 +17,9 @@ pose_method: superglue
reconstruction:
x_min: -0.20
y_min: -0.6
z_min: -0.05
x_max: 0.3
z_min: 0.0
# z_min: -0.05
x_max: 0.30
y_max: 0.6
z_max: 0.4
nb_neighbors: 50
Expand Down Expand Up @@ -79,12 +80,15 @@ superglue:
min_correspondence_thresh: 5

graspnet:
refine_approach_dist: 0.01
refine_approach_dist: 0.09

response:
dist_thresh: 0.1
angle_thresh: 20
mask_thresh: 0.5

debug: False
debug_pointcloud: True
debug_pointcloud: True

contact_graspnet: True
6dof_graspnet: False
Empty file modified ocrtoc_perception/run_contact_graspnet.sh
100644 → 100755
Empty file.
178 changes: 143 additions & 35 deletions ocrtoc_perception/src/ocrtoc_perception/perceptor.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
from numpy.core.numeric import full
import rospy
import rospkg
import random

# import tf.transformations

Expand Down Expand Up @@ -118,6 +119,10 @@ def __init__(
self.use_camera = self.config['use_camera']
self.arm_controller = ArmController(topic = self.config['arm_topic'])
self.camera_interface = CameraInterface()
self.graspnet_contact = self.config['contact_graspnet']
self.graspnet_6dof = self.config['6dof_graspnet']


rospy.sleep(2)
self.transform_interface = TransformInterface()
self.transform_from_frame = self.config['transform_from_frame']
Expand All @@ -134,6 +139,11 @@ def __init__(
time.sleep(2)
self.kinect_color_transform_to_frame = self.get_kinect_color_image_frame_id()
self.kinect_points_transform_to_frame = self.get_kinect_points_frame_id()


if self.graspnet_6dof:
self.config['reconstruction']['z_min'] = -0.0

self.fixed_arm_poses = np.loadtxt(
os.path.join(
rospkg.RosPack().get_path('ocrtoc_perception'),
Expand Down Expand Up @@ -209,6 +219,7 @@ def capture_data(self):
points_trans_matrix = self.get_kinect_points_transform_matrix()
full_pcd_kinect = self.kinect_get_pcd(use_graspnet_camera_frame = False) # in sapien frame.
full_pcd_kinect.transform(points_trans_matrix)
o3d.io.write_point_cloud("/root/ocrtoc_ws/src/kin_pcd.pcd", full_pcd_kinect)
full_pcd_kinect = kinect_process_pcd(full_pcd_kinect, self.config['reconstruction'])
if self.use_camera == 'both':
pcds.append(full_pcd_kinect)
Expand All @@ -220,7 +231,9 @@ def capture_data(self):
# cv2.waitKey(0)
# cv2.destroyAllWindows()
color_images.append(kinect_image)

# cv2.imwrite("/root/ocrtoc_ws/src/kin_image.png", cv2.cvtColor(kinect_image, cv2.COLOR_RGB2BGR))


if self.debug:
print('points_trans_matrix:', points_trans_matrix)
camera_poses.append(self.get_kinect_color_transform_matrix())
Expand All @@ -232,6 +245,8 @@ def capture_data(self):
arm_poses = np.array(self.fixed_arm_poses_both).tolist()
for arm_pose in arm_poses:
self.arm_controller.exec_joint_goal(arm_pose)


rospy.sleep(2.0)
time.sleep(1.0)
color_image = self.get_color_image()
Expand Down Expand Up @@ -267,7 +282,7 @@ def capture_data(self):
elif self.use_camera == 'both':
full_pcd = full_pcd_realsense
else:
raise ValueError('"use_camrera" should be "kinect", "realsense" or "both"')
raise ValueError('"use_camera" should be "kinect", "realsense" or "both"')
if self.debug:
t2 = time.time()
rospy.loginfo('Capturing data time:{}'.format(t2 - t1))
Expand All @@ -278,7 +293,7 @@ def capture_data(self):
def compute_6d_pose(self, full_pcd, color_images, camera_poses, pose_method, object_list):
camK = self.get_color_camK()

print('Camera Matrix:\n{}'.format(camK))
# print('Camera Matrix:\n{}'.format(camK))
if pose_method == 'icp':
if self.debug:
print('Using ICP to obtain 6d poses')
Expand Down Expand Up @@ -320,31 +335,33 @@ def compute_6d_pose(self, full_pcd, color_images, camera_poses, pose_method, obj
return object_poses

def compute_grasp_pose(self, full_pcd):
return self.compute_grasp_poses2(full_pcd)
# points, _ = o3dp.pcd2array(full_pcd)
# grasp_pcd = copy.deepcopy(full_pcd)
# grasp_pcd.points = o3d.utility.Vector3dVector(-points)

# # generating grasp poses.
# gg = self.graspnet_baseline.inference(grasp_pcd)
# gg.translations = -gg.translations
# gg.rotation_matrices = -gg.rotation_matrices
# gg.translations = gg.translations + gg.rotation_matrices[:, :, 0] * self.config['graspnet']['refine_approach_dist']
# gg = self.graspnet_baseline.collision_detection(gg, points)

points, _ = o3dp.pcd2array(full_pcd)
grasp_pcd = copy.deepcopy(full_pcd)
grasp_pcd.points = o3d.utility.Vector3dVector(-points)

# generating grasp poses.
gg = self.graspnet_baseline.inference(grasp_pcd)
gg.translations = -gg.translations
gg.rotation_matrices = -gg.rotation_matrices
gg.translations = gg.translations + gg.rotation_matrices[:, :, 0] * self.config['graspnet']['refine_approach_dist']
gg = self.graspnet_baseline.collision_detection(gg, points)

# print("Here are the grasp poses from the baseline {}".format(gg))

# # all the returned result in 'world' frame. 'gg' using 'graspnet' gripper frame.
# return gg
# all the returned result in 'world' frame. 'gg' using 'graspnet' gripper frame.
return gg

def compute_grasp_poses2(self, full_pcd):
def compute_grasp_pose2(self, full_pcd):
'''
Step 1: Call the contact graspnet API to generate the grasp poses
'''
random_value = random.randint(0, 10000)

# 1. Initialize all the paths and the data.
SAVE_PATH_NPZ = '/root/ocrtoc_ws/src/contact_graspnet/{}'.format('temp.npy')
SAVE_PATH_COLORS = '/root/ocrtoc_ws/src/contact_graspnet/{}'.format('temp_colors.npy')
GRASP_SAVE_PATH = '/root/ocrtoc_ws/src/contact_graspnet/{}'.format('grasps_temp.npy')
SAVE_PATH_NPZ = '/root/ocrtoc_ws/src/contact_graspnet/{}_{}.npy'.format('temp', random_value)
SAVE_PATH_COLORS = '/root/ocrtoc_ws/src/contact_graspnet/{}_{}.npy'.format('temp_colors', random_value)
GRASP_SAVE_PATH = '/root/ocrtoc_ws/src/contact_graspnet/{}_{}.npy'.format('grasps_temp', random_value)

full_pcd = copy.deepcopy(full_pcd)

Expand All @@ -371,7 +388,7 @@ def compute_grasp_poses2(self, full_pcd):
pred_grasps_cam = grasps['pred_grasps_cam'][-1]
grasp_scores = grasps['grasp_scores'][-1]

print(pred_grasps_cam.shape, grasp_scores.shape)
# print(pred_grasps_cam.shape, grasp_scores.shape)

'''
Step 2: Formatting the grasp poses into the baseline grasp pose patterns
Expand Down Expand Up @@ -405,7 +422,7 @@ def compute_grasp_poses2(self, full_pcd):
tx, ty, tz = grasp_pose[:3, -1]

# 2. global transform part 2, here we shift the y axis a little to adjust with the baseline frame of reference.
grasp_pose[:3, -1] = np.array([tx, ty-0.04, tz])
grasp_pose[:3, -1] = np.array([tx, ty-0.038, tz])

# 3. assignment of the grasp array to make it consistent with the baseline class (GraspGroup)
# grasp_score (1), width (1), height (1), depth (1), rotation (9), translation (3), object_id (1)
Expand All @@ -419,9 +436,10 @@ def compute_grasp_poses2(self, full_pcd):

grasps = np.vstack(grasps)

print('formatted shape: {}'.format(grasps.shape))
# print('formatted shape: {}'.format(grasps.shape))

gg = GraspGroup(grasps)
gg = GraspGroup(grasps)
# gg.translations = gg.translations + gg.rotation_matrices[:, :, 0] * self.config['graspnet']['refine_approach_dist']

# 4. another transformation for axes change
ts = gg.translations
Expand All @@ -439,6 +457,83 @@ def compute_grasp_poses2(self, full_pcd):

return gg, g_pose


def compute_grasp_poses3(self, full_pcd):
'''
Step 1: Call the contact graspnet API to generate the grasp poses
'''
# 1. Initialize all the paths and the data.
random_value = random.randint(0, 10000)

SAVE_PATH_NPZ = '/root/ocrtoc_ws/src/contact_graspnet/{}_{}.npy'.format('temp', random_value)
SAVE_PATH_COLORS = '/root/ocrtoc_ws/src/contact_graspnet/{}_{}.npy'.format('temp_colors', random_value)
GRASP_SAVE_PATH = '/root/ocrtoc_ws/src/contact_graspnet/{}_{}.npy'.format('grasps_temp', random_value)

points, _ = o3dp.pcd2array(full_pcd)
full_pcd_as_np = -(points.copy())
full_pcd_colors_as_np = np.asarray(full_pcd.colors)*255

np.save(SAVE_PATH_NPZ, full_pcd_as_np)
np.save(SAVE_PATH_COLORS, full_pcd_colors_as_np)

# 3. Run contact graspnet through the bash command
command = '/root/ocrtoc_ws/src/ocrtoc_perception/run_contact_graspnet.sh {} {} {}' \
.format(SAVE_PATH_NPZ, SAVE_PATH_COLORS, GRASP_SAVE_PATH)

print('Running Contact Graspnet')
os.system(command)

# 4. Load grasp poses generated by contact graspnet.
grasps = np.load(GRASP_SAVE_PATH, allow_pickle=True).item()

pred_grasps_cam = grasps['pred_grasps_cam'][-1]
grasp_scores = grasps['grasp_scores'][-1]

print(pred_grasps_cam.shape, grasp_scores.shape)

'''
Step 2: Formatting the grasp poses into the baseline grasp pose patterns
'''
grasps = []

g_pose = None

# follwing width, height, and depth values are taken from baseline graspnet
width, height, depth = 0.07500000298023224, 0.019999999552965164, 0.019999999552965164

for grasp_pose, grasp_score in zip(pred_grasps_cam, grasp_scores):
grasp = np.array([grasp_score, width, height, depth, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, -1], dtype = np.float64)

frame = o3d.geometry.TriangleMesh.create_coordinate_frame()
frame.transform(grasp_pose)

Rg = frame.get_rotation_matrix_from_zyx((0, -np.pi/2, np.pi/2))

T = np.eye(4)
T[:3, :3] = Rg

grasp_pose = grasp_pose @ T

grasp[4:13] = grasp_pose[:3, :3].reshape(-1)
grasp[13:16] = grasp_pose[:3, -1].reshape(-1)

grasps.append(grasp)
g_pose = grasp_pose

grasps = np.vstack(grasps)

print('formatted shape: {}'.format(grasps.shape))

gg = GraspGroup(grasps)

gg.translations = -gg.translations
gg.rotation_matrices = -gg.rotation_matrices
gg.translations = gg.translations + gg.rotation_matrices[:, :, 0] * self.config['graspnet']['refine_approach_dist']
gg = self.graspnet_baseline.collision_detection(gg, points)

return gg, g_pose


# Calculates rotation matrix to euler angles
# The result is the same as MATLAB except the order
# of the euler angles ( x and z are swapped ).
Expand Down Expand Up @@ -486,7 +581,7 @@ def assign_grasp_pose2(self, gg, object_poses):
dists = np.linalg.norm(ts - object_pose['pose'][:3, 3], axis = 1)
object_mask = dists < dist_thresh

print('here is the gg[object_mask]: {}'.format(gg[object_mask]))
# print('here is the gg[object_mask]: {}'.format(gg[object_mask]))

min_object_ids[i] = object_mask

Expand Down Expand Up @@ -520,19 +615,19 @@ def assign_grasp_pose2(self, gg, object_poses):
top_i_ts = i_ts[top_indices]

top_i_euler = np.array([self.rotationMatrixToEulerAngles(r) for r in top_i_eelink_rs])
print('Top Eulers shape', top_i_euler.shape)
# print('Top Eulers shape', top_i_euler.shape)

# next, we want the poses with the lowest gravitional angle
# we convert to euler, ideal is np.pi, 0. We sort according
# to the norm and then take the minimum of all the angls.
ideal_angle = np.array([np.pi, 0])
angles_scores = np.linalg.norm(ideal_angle - top_i_euler[:, :2], axis = 1)
print(" angle scores: {}".format(angles_scores))
print(" top i euler {}: ".format(top_i_euler[:, :2]))
# print(" angle scores: {}".format(angles_scores))
# print(" top i euler {}: ".format(top_i_euler[:, :2]))

smallest_index = np.argmin(angles_scores)
print("smallest index {}".format(smallest_index))
print('Best pose: {}'.format(top_i_euler[smallest_index]))
# print("smallest index {}".format(smallest_index))
# print('Best pose: {}'.format(top_i_euler[smallest_index]))

best_gg = top_i_gg[int(smallest_index)]

Expand Down Expand Up @@ -609,9 +704,9 @@ def assign_grasp_pose(self, gg, object_poses):
object_pose = object_poses[object_name]

dists = np.linalg.norm(ts - object_pose['pose'][:3,3], axis=1)
print('distances {}'.format(dists))
# print('distances {}'.format(dists))
object_mask = np.logical_and(dists < min_dists, dists < dist_thresh)
print('object mask {}'.format(object_mask))
# print('object mask {}'.format(object_mask))

min_object_ids[object_mask] = i
min_dists[object_mask] = dists[object_mask]
Expand Down Expand Up @@ -696,14 +791,26 @@ def percept(

o3d.io.write_point_cloud("/root/ocrtoc_ws/src/test.pcd", full_pcd)
# full_pcd = o3d.io.read_point_cloud("/root/ocrtoc_ws/src/test.pcd")
gg, t = self.compute_grasp_pose(full_pcd)
if self.debug_pointcloud:
print('g pose from the return function {}'.format(t))

if self.graspnet_contact:
gg, t = self.compute_grasp_poses3(full_pcd)
if self.graspnet_6dof:
gg = self.compute_grasp_pose(full_pcd)
if self.debug_pointcloud and self.graspnet_6dof:
# print('g pose from the return function {}'.format(t))
frame = o3d.geometry.TriangleMesh.create_coordinate_frame(0.1)
frame_grasp_pose = o3d.geometry.TriangleMesh.create_coordinate_frame(0.1)
# frame_grasp_pose.transform(t)
o3d.visualization.draw_geometries([frame, full_pcd, *gg.to_open3d_geometry_list(), frame_grasp_pose])

if self.debug_pointcloud and self.graspnet_contact:
# print('g pose from the return function {}'.format(t))
frame = o3d.geometry.TriangleMesh.create_coordinate_frame(0.1)
frame_grasp_pose = o3d.geometry.TriangleMesh.create_coordinate_frame(0.1)
frame_grasp_pose.transform(t)
o3d.visualization.draw_geometries([frame, full_pcd, *gg.to_open3d_geometry_list(), frame_grasp_pose])


# Computer Object 6d Poses
object_poses = self.compute_6d_pose(
full_pcd = full_pcd,
Expand Down Expand Up @@ -778,3 +885,4 @@ def get_response(self, object_list):
response_poses[object_name]['graspable'] = True
response_poses[object_name]['grasp_pose'] = grasp_poses[object_name]
return response_poses

Loading