|
20 | 20 | import google.protobuf.json_format as json_format
|
21 | 21 | import os
|
22 | 22 |
|
23 |
| -from grpc import StatusCode |
| 23 | +from grpc import StatusCode, RpcError |
24 | 24 | from grpc.framework.interfaces.face.face import AbortionError
|
25 | 25 | from tensorflow.core.framework import tensor_pb2
|
26 | 26 | from tf_container import proxy_client
|
@@ -130,15 +130,26 @@ def _wait_model_to_load(grpc_proxy_client, max_seconds):
|
130 | 130 |
|
131 | 131 | logger.info("TF Serving model successfully loaded")
|
132 | 132 | return
|
133 |
| - except AbortionError as err: |
134 |
| - if err.code == StatusCode.UNAVAILABLE: |
135 |
| - logger.info("Waiting for TF Serving to load the model") |
136 |
| - time.sleep(1) |
| 133 | + except AbortionError as abort_err: |
| 134 | + if abort_err.code == StatusCode.UNAVAILABLE: |
| 135 | + _handle_rpc_exception(abort_err) |
| 136 | + # GRPC throws a _Rendezvous, which inherits from RpcError |
| 137 | + # _Rendezvous has a method for code instead of a parameter. |
| 138 | + # https://github.com/grpc/grpc/issues/9270 |
| 139 | + except RpcError as rpc_error: |
| 140 | + if rpc_error.code() == StatusCode.UNAVAILABLE: |
| 141 | + _handle_rpc_exception(rpc_error) |
137 | 142 |
|
138 | 143 | message = 'TF Serving failed to load the model under the maximum load time in seconds: {}'
|
139 | 144 | raise ValueError(message.format(max_seconds))
|
140 | 145 |
|
141 | 146 |
|
| 147 | +def _handle_rpc_exception(err): |
| 148 | + logger.info("Waiting for TF Serving to load the model due to {}" |
| 149 | + .format(err.__class__.__name__)) |
| 150 | + time.sleep(1) |
| 151 | + |
| 152 | + |
142 | 153 | class Transformer(object):
|
143 | 154 | """A ``Transformer`` encapsulates the function(s) responsible
|
144 | 155 | for parsing incoming request data, passing it through an and converting the result into something
|
|
0 commit comments