5050from detectron .utils .logging import setup_logging
5151from detectron .utils .model_convert_utils import convert_op_in_proto
5252from detectron .utils .model_convert_utils import op_filter
53+ import detectron .utils .blob as blob_utils
5354import detectron .core .test_engine as test_engine
5455import detectron .utils .c2 as c2_utils
5556import detectron .utils .model_convert_utils as mutils
@@ -124,10 +125,41 @@ def unscope_name(name):
124125
125126
126127def reset_names (names ):
127- for i in range (0 , len (names )):
128+ for i in range (len (names )):
128129 names [i ] = unscope_name (names [i ])
129130
130131
132+ def convert_collect_and_distribute (
133+ op , blobs ,
134+ roi_canonical_scale ,
135+ roi_canonical_level ,
136+ roi_max_level ,
137+ roi_min_level ,
138+ rpn_max_level ,
139+ rpn_min_level ,
140+ rpn_post_nms_topN ,
141+ ):
142+ print ('Converting CollectAndDistributeFpnRpnProposals'
143+ ' Python -> C++:\n {}' .format (op ))
144+ assert op .name .startswith ('CollectAndDistributeFpnRpnProposalsOp' ), \
145+ 'Not valid CollectAndDistributeFpnRpnProposalsOp'
146+
147+ inputs = [x for x in op .input ]
148+ ret = core .CreateOperator (
149+ 'CollectAndDistributeFpnRpnProposals' ,
150+ inputs ,
151+ list (op .output ),
152+ roi_canonical_scale = roi_canonical_scale ,
153+ roi_canonical_level = roi_canonical_level ,
154+ roi_max_level = roi_max_level ,
155+ roi_min_level = roi_min_level ,
156+ rpn_max_level = rpn_max_level ,
157+ rpn_min_level = rpn_min_level ,
158+ rpn_post_nms_topN = rpn_post_nms_topN ,
159+ )
160+ return ret
161+
162+
131163def convert_gen_proposals (
132164 op , blobs ,
133165 rpn_pre_nms_topN ,
@@ -136,19 +168,22 @@ def convert_gen_proposals(
136168 rpn_min_size ,
137169):
138170 print ('Converting GenerateProposals Python -> C++:\n {}' .format (op ))
139- assert op .name .startswith (" GenerateProposalsOp" ), " Not valid GenerateProposalsOp"
171+ assert op .name .startswith (' GenerateProposalsOp' ), ' Not valid GenerateProposalsOp'
140172
141- spatial_scale = mutils .get_op_arg_valf (op , " spatial_scale" , None )
173+ spatial_scale = mutils .get_op_arg_valf (op , ' spatial_scale' , None )
142174 assert spatial_scale is not None
143175
176+ lvl = int (op .input [0 ][- 1 ]) if op .input [0 ][- 1 ].isdigit () else None
177+
144178 inputs = [x for x in op .input ]
145- anchor_name = " anchor"
179+ anchor_name = ' anchor{}' . format ( lvl ) if lvl else 'anchor'
146180 inputs .append (anchor_name )
147- blobs [anchor_name ] = get_anchors (spatial_scale )
181+ anchor_sizes = (cfg .FPN .RPN_ANCHOR_START_SIZE * 2. ** (lvl - cfg .FPN .RPN_MIN_LEVEL ),) if lvl else cfg .RPN .SIZES
182+ blobs [anchor_name ] = get_anchors (spatial_scale , anchor_sizes )
148183 print ('anchors {}' .format (blobs [anchor_name ]))
149184
150185 ret = core .CreateOperator (
151- " GenerateProposals" ,
186+ ' GenerateProposals' ,
152187 inputs ,
153188 list (op .output ),
154189 spatial_scale = spatial_scale ,
@@ -158,14 +193,13 @@ def convert_gen_proposals(
158193 min_size = rpn_min_size ,
159194 correct_transform_coords = True ,
160195 )
161-
162196 return ret , anchor_name
163197
164198
165- def get_anchors (spatial_scale ):
199+ def get_anchors (spatial_scale , anchor_sizes ):
166200 anchors = generate_anchors .generate_anchors (
167201 stride = 1. / spatial_scale ,
168- sizes = cfg . RPN . SIZES ,
202+ sizes = anchor_sizes ,
169203 aspect_ratios = cfg .RPN .ASPECT_RATIOS ).astype (np .float32 )
170204 return anchors
171205
@@ -188,36 +222,78 @@ def convert_op_name(op):
188222 reset_names (op .output )
189223 return [op ]
190224
191- @op_filter (type = "Python" , inputs = ['rpn_cls_probs' , 'rpn_bbox_pred' , 'im_info' ])
192- def convert_gen_proposal (op_in ):
193- gen_proposals_op , ext_input = convert_gen_proposals (
194- op_in , blobs ,
195- rpn_min_size = float (cfg .TEST .RPN_MIN_SIZE ),
196- rpn_post_nms_topN = cfg .TEST .RPN_POST_NMS_TOP_N ,
197- rpn_pre_nms_topN = cfg .TEST .RPN_PRE_NMS_TOP_N ,
198- rpn_nms_thresh = cfg .TEST .RPN_NMS_THRESH ,
199- )
200- net .external_input .extend ([ext_input ])
201- return [gen_proposals_op ]
225+ @op_filter (type = 'Python' )
226+ def convert_python (op ):
227+ if op .name .startswith ('GenerateProposalsOp' ):
228+ gen_proposals_op , ext_input = convert_gen_proposals (
229+ op , blobs ,
230+ rpn_min_size = float (cfg .TEST .RPN_MIN_SIZE ),
231+ rpn_post_nms_topN = cfg .TEST .RPN_POST_NMS_TOP_N ,
232+ rpn_pre_nms_topN = cfg .TEST .RPN_PRE_NMS_TOP_N ,
233+ rpn_nms_thresh = cfg .TEST .RPN_NMS_THRESH ,
234+ )
235+ net .external_input .extend ([ext_input ])
236+ return [gen_proposals_op ]
237+ elif op .name .startswith ('CollectAndDistributeFpnRpnProposalsOp' ):
238+ collect_dist_op = convert_collect_and_distribute (
239+ op , blobs ,
240+ roi_canonical_scale = cfg .FPN .ROI_CANONICAL_SCALE ,
241+ roi_canonical_level = cfg .FPN .ROI_CANONICAL_LEVEL ,
242+ roi_max_level = cfg .FPN .ROI_MAX_LEVEL ,
243+ roi_min_level = cfg .FPN .ROI_MIN_LEVEL ,
244+ rpn_max_level = cfg .FPN .RPN_MAX_LEVEL ,
245+ rpn_min_level = cfg .FPN .RPN_MIN_LEVEL ,
246+ rpn_post_nms_topN = cfg .TEST .RPN_POST_NMS_TOP_N ,
247+ )
248+ return [collect_dist_op ]
249+ else :
250+ raise ValueError ('Failed to convert Python op {}' .format (
251+ op .name ))
252+
253+ # Only convert UpsampleNearest to ResizeNearest when converting to pb so that the existing models is unchanged
254+ # https://github.com/facebookresearch/Detectron/pull/372#issuecomment-410248561
255+ @op_filter (type = 'UpsampleNearest' )
256+ def convert_upsample_nearest (op ):
257+ for arg in op .arg :
258+ if arg .name == 'scale' :
259+ scale = arg .i
260+ break
261+ else :
262+ raise KeyError ('No attribute "scale" in UpsampleNearest op' )
263+ resize_nearest_op = core .CreateOperator ('ResizeNearest' ,
264+ list (op .input ),
265+ list (op .output ),
266+ name = op .name ,
267+ width_scale = float (scale ),
268+ height_scale = float (scale ))
269+ return resize_nearest_op
202270
203- @op_filter (input_has = 'rois' )
271+ @op_filter ()
204272 def convert_rpn_rois (op ):
205- for j in range (0 , len (op .input )):
273+ for j in range (len (op .input )):
206274 if op .input [j ] == 'rois' :
207275 print ('Converting op {} input name: rois -> rpn_rois:\n {}' .format (
208276 op .type , op ))
209277 op .input [j ] = 'rpn_rois'
278+ for j in range (len (op .output )):
279+ if op .output [j ] == 'rois' :
280+ print ('Converting op {} output name: rois -> rpn_rois:\n {}' .format (
281+ op .type , op ))
282+ op .output [j ] = 'rpn_rois'
210283 return [op ]
211284
212285 @op_filter (type_in = ['StopGradient' , 'Alias' ])
213286 def convert_remove_op (op ):
214287 print ('Removing op {}:\n {}' .format (op .type , op ))
215288 return []
216289
290+ # We want to apply to all operators, including converted
291+ # so run separately
292+ convert_op_in_proto (net , convert_remove_op )
293+ convert_op_in_proto (net , convert_upsample_nearest )
294+ convert_op_in_proto (net , convert_python )
217295 convert_op_in_proto (net , convert_op_name )
218- convert_op_in_proto (net , [
219- convert_gen_proposal , convert_rpn_rois , convert_remove_op
220- ])
296+ convert_op_in_proto (net , convert_rpn_rois )
221297
222298 reset_names (net .external_input )
223299 reset_names (net .external_output )
@@ -272,6 +348,7 @@ def convert_model_gpu(args, net, init_net):
272348 cdo_cpu = mutils .get_device_option_cpu ()
273349
274350 CPU_OPS = [
351+ ["CollectAndDistributeFpnRpnProposals" , None ],
275352 ["GenerateProposals" , None ],
276353 ["BBoxTransform" , None ],
277354 ["BoxWithNMSLimit" , None ],
@@ -424,10 +501,8 @@ def _prepare_blobs(
424501 im = cv2 .resize (im , None , None , fx = im_scale , fy = im_scale ,
425502 interpolation = cv2 .INTER_LINEAR )
426503
427- blob = np .zeros ([1 , im .shape [0 ], im .shape [1 ], 3 ], dtype = np .float32 )
428- blob [0 , :, :, :] = im
429- channel_swap = (0 , 3 , 1 , 2 ) # swap channel to (k, c, h, w)
430- blob = blob .transpose (channel_swap )
504+ # Reuse code in blob_utils and fit FPN
505+ blob = blob_utils .im_list_to_blob ([im ])
431506
432507 blobs = {}
433508 blobs ['data' ] = blob
@@ -462,7 +537,7 @@ def run_model_pb(args, net, init_net, im, check_blobs):
462537 )
463538
464539 try :
465- workspace .RunNet (net . Proto (). name )
540+ workspace .RunNet (net )
466541 scores = workspace .FetchBlob ('score_nms' )
467542 classids = workspace .FetchBlob ('class_nms' )
468543 boxes = workspace .FetchBlob ('bbox_nms' )
@@ -520,13 +595,16 @@ def main():
520595 merge_cfg_from_list (args .opts )
521596 cfg .NUM_GPUS = 1
522597 assert_and_infer_cfg ()
523- logger .info ('Conerting model with config:' )
598+ logger .info ('Converting model with config:' )
524599 logger .info (pprint .pformat (cfg ))
525600
526- assert not cfg .MODEL .KEYPOINTS_ON , "Keypoint model not supported."
527- assert not cfg .MODEL .MASK_ON , "Mask model not supported."
528- assert not cfg .FPN .FPN_ON , "FPN not supported."
529- assert not cfg .RETINANET .RETINANET_ON , "RetinaNet model not supported."
601+ # script will stop when it can't find an operator rather
602+ # than stopping based on these flags
603+ #
604+ # assert not cfg.MODEL.KEYPOINTS_ON, "Keypoint model not supported."
605+ # assert not cfg.MODEL.MASK_ON, "Mask model not supported."
606+ # assert not cfg.FPN.FPN_ON, "FPN not supported."
607+ # assert not cfg.RETINANET.RETINANET_ON, "RetinaNet model not supported."
530608
531609 # load model from cfg
532610 model , blobs = load_model (args )
0 commit comments