|
| 1 | +import rospy |
| 2 | +import vncc_msgs.srv |
| 3 | +import vncc_msgs.msg |
| 4 | +import math |
| 5 | +import tf |
| 6 | +import tf.transformations as transformations |
| 7 | +import numpy |
| 8 | +import os.path |
| 9 | + |
| 10 | +from base import PerceptionModule, PerceptionMethod |
| 11 | + |
| 12 | + |
| 13 | +class VnccModule(PerceptionModule): |
| 14 | + |
| 15 | + def __init__(self, kinbody_path, detection_frame, world_frame, |
| 16 | + service_namespace=None): |
| 17 | + """ |
| 18 | + This initializes a VNCC detector. |
| 19 | + |
| 20 | + @param kinbody_path The path to the folder where kinbodies are stored |
| 21 | + @param detection_frame The TF frame of the camera |
| 22 | + @param world_frame The desired world TF frame |
| 23 | + @param service_namespace The namespace for the VNCC service (default: /vncc) |
| 24 | + """ |
| 25 | + |
| 26 | + # Initialize a new ros node if one has not already been created |
| 27 | + try: |
| 28 | + rospy.init_node('vncc_detector', anonymous=True) |
| 29 | + except rospy.exceptions.ROSException: |
| 30 | + pass |
| 31 | + |
| 32 | + #For getting transforms in world frame |
| 33 | + if detection_frame is not None and world_frame is not None: |
| 34 | + self.listener = tf.TransformListener() |
| 35 | + else: |
| 36 | + self.listener = None |
| 37 | + |
| 38 | + if service_namespace is None: |
| 39 | + service_namespace='/vncc' |
| 40 | + |
| 41 | + self.detection_frame = detection_frame |
| 42 | + self.world_frame = world_frame |
| 43 | + self.service_namespace = service_namespace |
| 44 | + |
| 45 | + self.kinbody_path = kinbody_path |
| 46 | + |
| 47 | + # A map of known objects that can be queries |
| 48 | + self.kinbody_to_query_map = {'plastic_plate': 'plate', |
| 49 | + 'plastic_bowl': 'bowl'} |
| 50 | + |
| 51 | + @staticmethod |
| 52 | + def _MsgToPose(msg): |
| 53 | + """ |
| 54 | + Parse the ROS message to a 4x4 pose format |
| 55 | + @param msg The ros message containing a pose |
| 56 | + @return A 4x4 transformation matrix containing the pose |
| 57 | + as read from the message |
| 58 | + """ |
| 59 | + #Get translation and rotation (from Euler angles) |
| 60 | + pose = transformations.euler_matrix(msg.roll*0.0,msg.pitch*0.0,msg.yaw*0.0) |
| 61 | + |
| 62 | + pose[0,3] = msg.pt.x |
| 63 | + pose[1,3] = msg.pt.y |
| 64 | + pose[2,3] = msg.pt.z |
| 65 | + |
| 66 | + return pose |
| 67 | + |
| 68 | + def _LocalToWorld(self,pose): |
| 69 | + """ |
| 70 | + Transform a pose from local frame to world frame |
| 71 | + @param pose The 4x4 transformation matrix containing the pose to transform |
| 72 | + @return The 4x4 transformation matrix describing the pose in world frame |
| 73 | + """ |
| 74 | + #Get pose w.r.t world frame |
| 75 | + self.listener.waitForTransform(self.world_frame,self.detection_frame, |
| 76 | + rospy.Time(),rospy.Duration(10)) |
| 77 | + t, r = self.listener.lookupTransform(self.world_frame,self.detection_frame, |
| 78 | + rospy.Time(0)) |
| 79 | + |
| 80 | + #Get relative transform between frames |
| 81 | + offset_to_world = numpy.matrix(transformations.quaternion_matrix(r)) |
| 82 | + offset_to_world[0,3] = t[0] |
| 83 | + offset_to_world[1,3] = t[1] |
| 84 | + offset_to_world[2,3] = t[2] |
| 85 | + |
| 86 | + #Compose with pose to get pose in world frame |
| 87 | + result = numpy.array(numpy.dot(offset_to_world, pose)) |
| 88 | + |
| 89 | + return result |
| 90 | + |
| 91 | + |
| 92 | + def _GetDetection(self, obj_name): |
| 93 | + """ |
| 94 | + Calls the service to get a detection of a particular object. |
| 95 | + @param obj_name The name of the object to detect |
| 96 | + @return A 4x4 transformation matrix describing the pose of the object |
| 97 | + in world frame, None if the object is not detected |
| 98 | + """ |
| 99 | + #Call detection service for a particular object |
| 100 | + detect_vncc = rospy.ServiceProxy(self.service_namespace+'/get_vncc_detections', |
| 101 | + vncc_msgs.srv.GetDetections) |
| 102 | + |
| 103 | + detect_resp = detect_vncc(object_name=obj_name) |
| 104 | + |
| 105 | + if detect_resp.ok == False: |
| 106 | + return None |
| 107 | + |
| 108 | + #Assumes one instance of object |
| 109 | + result = self._MsgToPose(detect_resp.detections[0]) |
| 110 | + if (self.detection_frame is not None and self.world_frame is not None): |
| 111 | + result = self._LocalToWorld(result) |
| 112 | + result[:3,:3] = numpy.eye(3) |
| 113 | + return result |
| 114 | + |
| 115 | + @PerceptionMethod |
| 116 | + def DetectObject(self, robot, obj_name, **kw_args): |
| 117 | + """ |
| 118 | + Detect a single object |
| 119 | + @param robot The OpenRAVE robot |
| 120 | + @param obj_name The name of the object to detect |
| 121 | + @returns A kinbody placed in the detected location |
| 122 | + @throws PerceptionException if the object is not detected |
| 123 | + """ |
| 124 | + |
| 125 | + from prpy.perception.base import PerceptionException |
| 126 | + |
| 127 | + if obj_name not in self.kinbody_to_query_map: |
| 128 | + raise PerceptionException('The VNCC module cannot detect object %s', obj_name) |
| 129 | + |
| 130 | + query_name = self.kinbody_to_query_map[obj_name] |
| 131 | + obj_pose = self._GetDetection(query_name) |
| 132 | + if obj_pose is None: |
| 133 | + raise PerceptionException('Failed to detect objects %s', obj_name) |
| 134 | + |
| 135 | + env = robot.GetEnv() |
| 136 | + if env.GetKinBody(obj_name) is None: |
| 137 | + from prpy.rave import add_object |
| 138 | + kinbody_file = '%s.kinbody.xml' % obj_name |
| 139 | + new_body = add_object( |
| 140 | + env, |
| 141 | + obj_name, |
| 142 | + os.path.join(self.kinbody_path, kinbody_file)) |
| 143 | + |
| 144 | + body = env.GetKinBody(obj_name) |
| 145 | + body.SetTransform(obj_pose) |
| 146 | + return body |
0 commit comments