|
| 1 | +import math |
| 2 | +import numpy |
| 3 | +import os.path |
| 4 | + |
| 5 | +from base import PerceptionModule, PerceptionMethod |
| 6 | + |
| 7 | + |
| 8 | +class SimtrackModule(PerceptionModule): |
| 9 | + |
| 10 | + def __init__(self, kinbody_path, detection_frame, world_frame, |
| 11 | + service_namespace=None): |
| 12 | + """ |
| 13 | + This initializes a simtrack detector. |
| 14 | + |
| 15 | + @param kinbody_path The path to the folder where kinbodies are stored |
| 16 | + @param detection_frame The TF frame of the camera |
| 17 | + @param world_frame The desired world TF frame |
| 18 | + @param service_namespace The namespace for the simtrack service (default: /simtrack) |
| 19 | + """ |
| 20 | + import rospy |
| 21 | + import tf |
| 22 | + import tf.transformations as transformations |
| 23 | + # Initialize a new ros node if one has not already been created |
| 24 | + try: |
| 25 | + rospy.init_node('simtrack_detector', anonymous=True) |
| 26 | + except rospy.exceptions.ROSException: |
| 27 | + pass |
| 28 | + |
| 29 | + #For getting transforms in world frame |
| 30 | + if detection_frame is not None and world_frame is not None: |
| 31 | + self.listener = tf.TransformListener() |
| 32 | + else: |
| 33 | + self.listener = None |
| 34 | + |
| 35 | + if service_namespace is None: |
| 36 | + service_namespace='/simtrack' |
| 37 | + |
| 38 | + self.detection_frame = detection_frame |
| 39 | + self.world_frame = world_frame |
| 40 | + self.service_namespace = service_namespace |
| 41 | + |
| 42 | + self.kinbody_path = kinbody_path |
| 43 | + |
| 44 | + # A map of known objects that can be queries |
| 45 | + self.kinbody_to_query_map = {'fuze_bottle': 'fuze_bottle_visual', |
| 46 | + 'pop_tarts': 'pop_tarts_visual'} |
| 47 | + self.query_to_kinbody_map = {'fuze_bottle_visual': 'fuze_bottle', |
| 48 | + 'pop_tarts_visual': 'pop_tarts'} |
| 49 | + |
| 50 | + @staticmethod |
| 51 | + def _MsgToPose(msg): |
| 52 | + """ |
| 53 | + Parse the ROS message to a 4x4 pose format |
| 54 | + @param msg The ros message containing a pose |
| 55 | + @return A 4x4 transformation matrix containing the pose |
| 56 | + as read from the message |
| 57 | + """ |
| 58 | + import tf.transformations as transformations |
| 59 | + #Get translation and rotation (from Euler angles) |
| 60 | + pose = transformations.quaternion_matrix(numpy.array([msg.pose.orientation.x, msg.pose.orientation.y, msg.pose.orientation.z, msg.pose.orientation.w])) |
| 61 | + |
| 62 | + pose[0,3] = msg.pose.position.x |
| 63 | + pose[1,3] = msg.pose.position.y |
| 64 | + pose[2,3] = msg.pose.position.z |
| 65 | + |
| 66 | + return pose |
| 67 | + |
| 68 | + def _LocalToWorld(self,pose): |
| 69 | + """ |
| 70 | + Transform a pose from local frame to world frame |
| 71 | + @param pose The 4x4 transformation matrix containing the pose to transform |
| 72 | + @return The 4x4 transformation matrix describing the pose in world frame |
| 73 | + """ |
| 74 | + import rospy |
| 75 | + #Get pose w.r.t world frame |
| 76 | + self.listener.waitForTransform(self.world_frame,self.detection_frame, |
| 77 | + rospy.Time(),rospy.Duration(10)) |
| 78 | + t, r = self.listener.lookupTransform(self.world_frame,self.detection_frame, |
| 79 | + rospy.Time(0)) |
| 80 | + |
| 81 | + #Get relative transform between frames |
| 82 | + offset_to_world = numpy.matrix(transformations.quaternion_matrix(r)) |
| 83 | + offset_to_world[0,3] = t[0] |
| 84 | + offset_to_world[1,3] = t[1] |
| 85 | + offset_to_world[2,3] = t[2] |
| 86 | + |
| 87 | + #Compose with pose to get pose in world frame |
| 88 | + result = numpy.array(numpy.dot(offset_to_world, pose)) |
| 89 | + |
| 90 | + return result |
| 91 | + |
| 92 | + |
| 93 | + def _GetDetections(self, obj_names): |
| 94 | + import simtrack_msgs.srv |
| 95 | + """ |
| 96 | + Calls the service to get a detection of a particular object. |
| 97 | + @param obj_name The name of the object to detect |
| 98 | + @return A 4x4 transformation matrix describing the pose of the object |
| 99 | + in world frame, None if the object is not detected |
| 100 | + """ |
| 101 | + #Call detection service for a particular object |
| 102 | + detect_simtrack = rospy.ServiceProxy(self.service_namespace+'/detect_objects', |
| 103 | + simtrack_msgs.srv.DetectObjects) |
| 104 | + |
| 105 | + detect_resp = detect_simtrack(obj_names, 5.0) |
| 106 | + |
| 107 | + |
| 108 | + detections = [] |
| 109 | + |
| 110 | + for i in xrange(0, len(detect_resp.detected_models)) : |
| 111 | + obj_name = detect_resp.detected_models[i]; |
| 112 | + obj_pose = detect_resp.detected_poses[i]; |
| 113 | + obj_pose_tf = self._MsgToPose(obj_pose); |
| 114 | + if (self.detection_frame is not None and self.world_frame is not None): |
| 115 | + obj_pose_tf = self._LocalToWorld(obj_pose_tf) |
| 116 | + detections.append((obj_name, obj_pose_tf)); |
| 117 | + |
| 118 | + return detections |
| 119 | + |
| 120 | + @PerceptionMethod |
| 121 | + def DetectObject(self, robot, obj_name, **kw_args): |
| 122 | + """ |
| 123 | + Detect a single object |
| 124 | + @param robot The OpenRAVE robot |
| 125 | + @param obj_name The name of the object to detect |
| 126 | + @returns A kinbody placed in the detected location |
| 127 | + @throws PerceptionException if the object is not detected |
| 128 | + """ |
| 129 | + |
| 130 | + from prpy.perception.base import PerceptionException |
| 131 | + |
| 132 | + if obj_name not in self.kinbody_to_query_map: |
| 133 | + raise PerceptionException('The simtrack module cannot detect object %s', obj_name) |
| 134 | + |
| 135 | + query_name = self.kinbody_to_query_map[obj_name] |
| 136 | + obj_poses = self._GetDetections(query_name) |
| 137 | + if len(obj_poses is 0): |
| 138 | + raise PerceptionException('Failed to detect object %s', obj_name) |
| 139 | + |
| 140 | + obj_pose = None |
| 141 | + |
| 142 | + for (name, pose) in obj_poses: |
| 143 | + if (name is query_name): |
| 144 | + obj_pose = pose |
| 145 | + break; |
| 146 | + |
| 147 | + if (obj_pose is None): |
| 148 | + raise PerceptionException('Failed to detect object %s', obj_name) |
| 149 | + |
| 150 | + env = robot.GetEnv() |
| 151 | + if env.GetKinBody(obj_name) is None: |
| 152 | + from prpy.rave import add_object |
| 153 | + kinbody_file = '%s.kinbody.xml' % obj_name |
| 154 | + new_body = add_object( |
| 155 | + env, |
| 156 | + obj_name, |
| 157 | + os.path.join(self.kinbody_path, kinbody_file)) |
| 158 | + |
| 159 | + body = env.GetKinBody(obj_name) |
| 160 | + body.SetTransform(obj_pose) |
| 161 | + return body |
| 162 | + |
| 163 | + @PerceptionMethod |
| 164 | + def DetectObjects(self, robot, **kw_args): |
| 165 | + """ |
| 166 | + Overriden method for detection_frame |
| 167 | + """ |
| 168 | + from prpy.perception.base import PerceptionException |
| 169 | + env = robot.GetEnv() |
| 170 | + # Detecting empty list will detect all possible objects |
| 171 | + detections = self._GetDetections([]) |
| 172 | + |
| 173 | + for (obj_name, obj_pose) in detections: |
| 174 | + if (obj_name not in self.query_to_kinbody_map): |
| 175 | + continue |
| 176 | + |
| 177 | + kinbody_name = self.query_to_kinbody_map[obj_name] |
| 178 | + |
| 179 | + if env.GetKinBody(kinbody_name) is None: |
| 180 | + from prpy.rave import add_object |
| 181 | + kinbody_file = '%s.kinbody.xml' % kinbody_name |
| 182 | + new_body = add_object( |
| 183 | + env, |
| 184 | + kinbody_name, |
| 185 | + os.path.join(self.kinbody_path, kinbody_file)) |
| 186 | + print kinbody_name |
| 187 | + body = env.GetKinBody(kinbody_name) |
| 188 | + body.SetTransform(obj_pose) |
| 189 | + |
| 190 | + |
0 commit comments