Skip to content

Commit 5ba36b4

Browse files
committed
Merge pull request #200 from personalrobotics/feature/vncc_integration
VNCC integration.
2 parents 5514d2c + 16eb655 commit 5ba36b4

File tree

3 files changed

+178
-27
lines changed

3 files changed

+178
-27
lines changed

src/prpy/perception/apriltags.py

Lines changed: 29 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -59,33 +59,11 @@ def __init__(self, marker_topic, marker_data_path, kinbody_path,
5959

6060
def __str__(self):
6161
return self.__class__.__name__
62+
6263

63-
def DetectObjects(self, env, object_names, **kw_args):
64-
"""
65-
This hack detects only the objects in object_names. Updates existing
66-
objects, but only adds objects in the object_names list.
67-
68-
@param env: The current OpenRAVE environment
69-
@param object_names: The list of names of objects to detect
70-
"""
71-
added_kinbodies, updated_kinbodies = self._DetectObjects(env, **kw_args);
72-
detected = [];
73-
for body in added_kinbodies:
74-
if not (body.GetName() in object_names):
75-
env.RemoveKinbody(body);
76-
else:
77-
detected.append(body);
78-
return detected;
79-
80-
def DetectObject(self, env, object_name, **kw_args):
81-
"""
82-
Detects a single named object.
83-
"""
84-
return (self._DetectObjects( env, object_names=[object_name], **kw_args))[0][0];
85-
86-
87-
def _DetectObjects(self, env, marker_topic, marker_data_path, kinbody_path,
88-
detection_frame, destination_frame,**kw_args):
64+
def _DetectObjects(self, env, marker_topic=None, marker_data_path=None,
65+
kinbody_path=None, detection_frame=None,
66+
destination_frame=None, **kw_args):
8967
"""
9068
Use the apriltags service to detect objects and add them to the
9169
environment. Params are as in __init__.
@@ -139,5 +117,29 @@ def DetectObjects(self, robot, **kw_args):
139117
"""
140118
Overriden method for detection_frame
141119
"""
142-
return self._DetectObjects(robot.GetEnv(),**kwargs)
120+
added_kinbodies, updated_kinbodies = self._DetectObjects(robot.GetEnv(), **kw_args)
121+
return added_kinbodies + updated_kinbodies
143122

123+
@PerceptionMethod
124+
def DetectObject(self, robot, object_name, **kw_args):
125+
"""
126+
Detects a single named object.
127+
"""
128+
added_bodies, updated_bodies = self._DetectObjects(env, **kw_args)
129+
130+
return_obj = None
131+
for obj in added_bodies:
132+
if obj.GetName() != object_name:
133+
env.Remove(obj)
134+
else:
135+
return_obj = obj
136+
for obj in updated_bodies:
137+
if obj.GetName() == object_name:
138+
return_obj = obj
139+
# TODO: Otherwise we need to put it back
140+
141+
if return_obj is not None:
142+
return return_obj
143+
144+
from prpy.perception.base import PerceptionException
145+
raise PerceptionException('Failed to detect object %s', object_name)

src/prpy/perception/base.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,9 @@
22

33
import functools
44

5+
class PerceptionException(Exception):
6+
pass
7+
58
class PerceptionMethod(object):
69

710
def __init__(self, func):

src/prpy/perception/vncc.py

Lines changed: 146 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,146 @@
1+
import rospy
2+
import vncc_msgs.srv
3+
import vncc_msgs.msg
4+
import math
5+
import tf
6+
import tf.transformations as transformations
7+
import numpy
8+
import os.path
9+
10+
from base import PerceptionModule, PerceptionMethod
11+
12+
13+
class VnccModule(PerceptionModule):
14+
15+
def __init__(self, kinbody_path, detection_frame, world_frame,
16+
service_namespace=None):
17+
"""
18+
This initializes a VNCC detector.
19+
20+
@param kinbody_path The path to the folder where kinbodies are stored
21+
@param detection_frame The TF frame of the camera
22+
@param world_frame The desired world TF frame
23+
@param service_namespace The namespace for the VNCC service (default: /vncc)
24+
"""
25+
26+
# Initialize a new ros node if one has not already been created
27+
try:
28+
rospy.init_node('vncc_detector', anonymous=True)
29+
except rospy.exceptions.ROSException:
30+
pass
31+
32+
#For getting transforms in world frame
33+
if detection_frame is not None and world_frame is not None:
34+
self.listener = tf.TransformListener()
35+
else:
36+
self.listener = None
37+
38+
if service_namespace is None:
39+
service_namespace='/vncc'
40+
41+
self.detection_frame = detection_frame
42+
self.world_frame = world_frame
43+
self.service_namespace = service_namespace
44+
45+
self.kinbody_path = kinbody_path
46+
47+
# A map of known objects that can be queries
48+
self.kinbody_to_query_map = {'plastic_plate': 'plate',
49+
'plastic_bowl': 'bowl'}
50+
51+
@staticmethod
52+
def _MsgToPose(msg):
53+
"""
54+
Parse the ROS message to a 4x4 pose format
55+
@param msg The ros message containing a pose
56+
@return A 4x4 transformation matrix containing the pose
57+
as read from the message
58+
"""
59+
#Get translation and rotation (from Euler angles)
60+
pose = transformations.euler_matrix(msg.roll*0.0,msg.pitch*0.0,msg.yaw*0.0)
61+
62+
pose[0,3] = msg.pt.x
63+
pose[1,3] = msg.pt.y
64+
pose[2,3] = msg.pt.z
65+
66+
return pose
67+
68+
def _LocalToWorld(self,pose):
69+
"""
70+
Transform a pose from local frame to world frame
71+
@param pose The 4x4 transformation matrix containing the pose to transform
72+
@return The 4x4 transformation matrix describing the pose in world frame
73+
"""
74+
#Get pose w.r.t world frame
75+
self.listener.waitForTransform(self.world_frame,self.detection_frame,
76+
rospy.Time(),rospy.Duration(10))
77+
t, r = self.listener.lookupTransform(self.world_frame,self.detection_frame,
78+
rospy.Time(0))
79+
80+
#Get relative transform between frames
81+
offset_to_world = numpy.matrix(transformations.quaternion_matrix(r))
82+
offset_to_world[0,3] = t[0]
83+
offset_to_world[1,3] = t[1]
84+
offset_to_world[2,3] = t[2]
85+
86+
#Compose with pose to get pose in world frame
87+
result = numpy.array(numpy.dot(offset_to_world, pose))
88+
89+
return result
90+
91+
92+
def _GetDetection(self, obj_name):
93+
"""
94+
Calls the service to get a detection of a particular object.
95+
@param obj_name The name of the object to detect
96+
@return A 4x4 transformation matrix describing the pose of the object
97+
in world frame, None if the object is not detected
98+
"""
99+
#Call detection service for a particular object
100+
detect_vncc = rospy.ServiceProxy(self.service_namespace+'/get_vncc_detections',
101+
vncc_msgs.srv.GetDetections)
102+
103+
detect_resp = detect_vncc(object_name=obj_name)
104+
105+
if detect_resp.ok == False:
106+
return None
107+
108+
#Assumes one instance of object
109+
result = self._MsgToPose(detect_resp.detections[0])
110+
if (self.detection_frame is not None and self.world_frame is not None):
111+
result = self._LocalToWorld(result)
112+
result[:3,:3] = numpy.eye(3)
113+
return result
114+
115+
@PerceptionMethod
116+
def DetectObject(self, robot, obj_name, **kw_args):
117+
"""
118+
Detect a single object
119+
@param robot The OpenRAVE robot
120+
@param obj_name The name of the object to detect
121+
@returns A kinbody placed in the detected location
122+
@throws PerceptionException if the object is not detected
123+
"""
124+
125+
from prpy.perception.base import PerceptionException
126+
127+
if obj_name not in self.kinbody_to_query_map:
128+
raise PerceptionException('The VNCC module cannot detect object %s', obj_name)
129+
130+
query_name = self.kinbody_to_query_map[obj_name]
131+
obj_pose = self._GetDetection(query_name)
132+
if obj_pose is None:
133+
raise PerceptionException('Failed to detect objects %s', obj_name)
134+
135+
env = robot.GetEnv()
136+
if env.GetKinBody(obj_name) is None:
137+
from prpy.rave import add_object
138+
kinbody_file = '%s.kinbody.xml' % obj_name
139+
new_body = add_object(
140+
env,
141+
obj_name,
142+
os.path.join(self.kinbody_path, kinbody_file))
143+
144+
body = env.GetKinBody(obj_name)
145+
body.SetTransform(obj_pose)
146+
return body

0 commit comments

Comments
 (0)