|
7 | 7 | import numpy
|
8 | 8 | import os.path
|
9 | 9 |
|
| 10 | +from base import PerceptionModule, PerceptionMethod |
| 11 | + |
| 12 | + |
| 13 | +class VnccModule(PerceptionModule): |
| 14 | + |
| 15 | + def __init__(self, kinbody_path, detection_frame, world_frame, |
| 16 | + service_namespace=None): |
| 17 | + """ |
| 18 | + This initializes a VNCC detector. |
| 19 | + |
| 20 | + @param kinbody_path The path to the folder where kinbodies are stored |
| 21 | + @param detection_frame The TF frame of the camera |
| 22 | + @param world_frame The desired world TF frame |
| 23 | + @param service_namespace The namespace for the VNCC service (default: /vncc) |
| 24 | + """ |
| 25 | + |
| 26 | + # Initialize a new ros node if one has not already been created |
| 27 | + try: |
| 28 | + rospy.init_node('vncc_detector', anonymous=True) |
| 29 | + except rospy.exceptions.ROSException: |
| 30 | + pass |
| 31 | + |
| 32 | + #For getting transforms in world frame |
| 33 | + if detection_frame is not None and world_frame is not None: |
| 34 | + self.listener = tf.TransformListener() |
| 35 | + else: |
| 36 | + self.listener = None |
| 37 | + |
| 38 | + if service_namespace is None: |
| 39 | + service_namespace='/vncc' |
10 | 40 |
|
11 |
| -def MsgToPose(msg): |
12 |
| - |
13 |
| - #Parse the ROS message to a 4x4 pose format |
14 |
| - |
15 |
| - #Get translation and rotation (from Euler angles) |
16 |
| - pose = transformations.euler_matrix(msg.roll*0.0,msg.pitch*0.0,msg.yaw*0.0) |
17 |
| - |
| 41 | + self.detection_frame = detection_frame |
| 42 | + self.world_frame = world_frame |
| 43 | + self.service_namespace = service_namespace |
| 44 | + |
| 45 | + self.kinbody_path = kinbody_path |
| 46 | + |
| 47 | + # A map of known objects that can be queries |
| 48 | + self.kinbody_to_query_map = {'plastic_plate': 'plate', |
| 49 | + 'plastic_bowl': 'bowl'} |
| 50 | + |
| 51 | + @staticmethod |
| 52 | + def _MsgToPose(msg): |
| 53 | + """ |
| 54 | + Parse the ROS message to a 4x4 pose format |
| 55 | + @param msg The ros message containing a pose |
| 56 | + @return A 4x4 transformation matrix containing the pose |
| 57 | + as read from the message |
| 58 | + """ |
| 59 | + #Get translation and rotation (from Euler angles) |
| 60 | + pose = transformations.euler_matrix(msg.roll*0.0,msg.pitch*0.0,msg.yaw*0.0) |
| 61 | + |
18 | 62 | pose[0,3] = msg.pt.x
|
19 | 63 | pose[1,3] = msg.pt.y
|
20 | 64 | pose[2,3] = msg.pt.z
|
21 |
| - |
| 65 | + |
22 | 66 | return pose
|
23 | 67 |
|
24 |
| - |
25 |
| -class VnccDetector(object): |
26 |
| - |
27 |
| - def __init__(self, env, kinbody_directory, |
28 |
| - service_namespace=None, |
29 |
| - detection_frame=None,world_frame=None): |
30 |
| - |
31 |
| - |
32 |
| - # Initialize a new ros node if one has not already been created |
33 |
| - try: |
34 |
| - rospy.init_node('vncc_detector', anonymous=True) |
35 |
| - except rospy.exceptions.ROSException: |
36 |
| - pass |
| 68 | + def _LocalToWorld(self,pose): |
| 69 | + """ |
| 70 | + Transform a pose from local frame to world frame |
| 71 | + @param pose The 4x4 transformation matrix containing the pose to transform |
| 72 | + @return The 4x4 transformation matrix describing the pose in world frame |
| 73 | + """ |
| 74 | + #Get pose w.r.t world frame |
| 75 | + self.listener.waitForTransform(self.world_frame,self.detection_frame, |
| 76 | + rospy.Time(),rospy.Duration(10)) |
| 77 | + t, r = self.listener.lookupTransform(self.world_frame,self.detection_frame, |
| 78 | + rospy.Time(0)) |
| 79 | + |
| 80 | + #Get relative transform between frames |
| 81 | + offset_to_world = numpy.matrix(transformations.quaternion_matrix(r)) |
| 82 | + offset_to_world[0,3] = t[0] |
| 83 | + offset_to_world[1,3] = t[1] |
| 84 | + offset_to_world[2,3] = t[2] |
| 85 | + |
| 86 | + #Compose with pose to get pose in world frame |
| 87 | + result = numpy.array(numpy.dot(offset_to_world, pose)) |
| 88 | + |
| 89 | + return result |
| 90 | + |
| 91 | + |
| 92 | + def _GetDetection(self, obj_name): |
| 93 | + """ |
| 94 | + Calls the service to get a detection of a particular object. |
| 95 | + @param obj_name The name of the object to detect |
| 96 | + @return A 4x4 transformation matrix describing the pose of the object |
| 97 | + in world frame, None if the object is not detected |
| 98 | + """ |
| 99 | + #Call detection service for a particular object |
| 100 | + detect_vncc = rospy.ServiceProxy(self.service_namespace+'/get_vncc_detections', |
| 101 | + vncc_msgs.srv.GetDetections) |
| 102 | + |
| 103 | + detect_resp = detect_vncc(object_name=obj_name) |
| 104 | + |
| 105 | + if detect_resp.ok == False: |
| 106 | + return None |
37 | 107 |
|
38 |
| - #For getting transforms in world frame |
39 |
| - if detection_frame is not None and world_frame is not None: |
40 |
| - self.listener = tf.TransformListener() |
41 |
| - else: |
42 |
| - self.listener = None |
43 |
| - |
44 |
| - if service_namespace is None: |
45 |
| - service_namespace='/vncc' |
46 |
| - |
47 |
| - self.detection_frame = detection_frame |
48 |
| - self.world_frame = world_frame |
49 |
| - self.service_namespace = service_namespace |
| 108 | + #Assumes one instance of object |
| 109 | + result = self._MsgToPose(detect_resp.detections[0]) |
| 110 | + if (self.detection_frame is not None and self.world_frame is not None): |
| 111 | + result = self._LocalToWorld(result) |
| 112 | + result[:3,:3] = numpy.eye(3) |
| 113 | + return result |
| 114 | + |
| 115 | + @PerceptionMethod |
| 116 | + def DetectObject(self, robot, obj_name, **kw_args): |
| 117 | + """ |
| 118 | + Detect a single object |
| 119 | + @param robot The OpenRAVE robot |
| 120 | + @param obj_name The name of the object to detect |
| 121 | + @returns A kinbody placed in the detected location |
| 122 | + @throws PerceptionException if the object is not detected |
| 123 | + """ |
| 124 | + |
| 125 | + from prpy.perception.base import PerceptionException |
| 126 | + |
| 127 | + if obj_name not in self.kinbody_to_query_map: |
| 128 | + raise PerceptionException('The VNCC module cannot detect object %s', obj_name) |
| 129 | + |
| 130 | + query_name = self.kinbody_to_query_map[obj_name] |
| 131 | + obj_pose = self._GetDetection(query_name) |
| 132 | + if obj_pose is None: |
| 133 | + raise PerceptionException('Failed to detect objects %s', obj_name) |
50 | 134 |
|
51 |
| - self.env = env |
52 |
| - self.kinbody_directory = kinbody_directory |
53 |
| - self.kinbody_to_query_map = {'plastic_plate': 'plate', |
54 |
| - 'plastic_bowl': 'bowl'} |
55 |
| - |
56 |
| - def LocalToWorld(self,pose): |
57 |
| - #Get pose w.r.t world frame |
58 |
| - self.listener.waitForTransform(self.world_frame,self.detection_frame, |
59 |
| - rospy.Time(),rospy.Duration(10)) |
60 |
| - t, r = self.listener.lookupTransform(self.world_frame,self.detection_frame,rospy.Time(0)) |
61 |
| - |
62 |
| - #Get relative transform between frames |
63 |
| - offset_to_world = numpy.matrix(transformations.quaternion_matrix(r)) |
64 |
| - offset_to_world[0,3] = t[0] |
65 |
| - offset_to_world[1,3] = t[1] |
66 |
| - offset_to_world[2,3] = t[2] |
67 |
| - |
68 |
| - #Compose with pose to get pose in world frame |
69 |
| - result = numpy.array(numpy.dot(offset_to_world, pose)) |
70 |
| - |
71 |
| - return result |
72 |
| - |
73 |
| - |
74 |
| - def GetDetection(self,obj_name): |
75 |
| - |
76 |
| - #Call detection service for a particular object |
77 |
| - detect_vncc = rospy.ServiceProxy(self.service_namespace+'/get_vncc_detections', |
78 |
| - vncc_msgs.srv.GetDetections) |
79 |
| - |
80 |
| - detect_resp = detect_vncc(object_name=obj_name) |
81 |
| - |
82 |
| - if detect_resp.ok == False: |
83 |
| - return None |
84 |
| - #Assumes one instance of object |
85 |
| - result = MsgToPose(detect_resp.detections[0]) |
86 |
| - if (self.detection_frame is not None and self.world_frame is not None): |
87 |
| - result = self.LocalToWorld(result) |
88 |
| - |
89 |
| - return result |
90 |
| - |
91 |
| - def DetectObject(self, obj_name): |
92 |
| - |
93 |
| - if obj_name not in self.kinbody_to_query_map: |
94 |
| - from prpy.perception.base import PerceptionException |
95 |
| - raise PerceptionException('Unknown object') |
96 |
| - |
97 |
| - query_name = self.kinbody_to_query_map[obj_name] |
98 |
| - obj_pose = self.GetDetection(query_name) |
99 |
| - |
100 |
| - if self.env.GetKinBody(obj_name) is None: |
101 |
| - from prpy.rave import add_object |
102 |
| - kinbody_file = os.path.join('objects', '%s.kinbody.xml' % obj_name) |
103 |
| - new_body = add_object( |
104 |
| - self.env, |
105 |
| - obj_name, |
106 |
| - os.path.join(self.kinbody_directory, kinbody_file)) |
107 |
| - |
108 |
| - body = self.env.GetKinBody(obj_name) |
109 |
| - body.SetTransform(obj_pose) |
110 |
| - return body |
| 135 | + env = robot.GetEnv() |
| 136 | + if env.GetKinBody(obj_name) is None: |
| 137 | + from prpy.rave import add_object |
| 138 | + kinbody_file = '%s.kinbody.xml' % obj_name |
| 139 | + new_body = add_object( |
| 140 | + env, |
| 141 | + obj_name, |
| 142 | + os.path.join(self.kinbody_path, kinbody_file)) |
| 143 | + |
| 144 | + body = env.GetKinBody(obj_name) |
| 145 | + body.SetTransform(obj_pose) |
| 146 | + return body |
0 commit comments