Skip to content

Commit 0f80e68

Browse files
committed
update tensorflow serving protos to 2.6.0
update .bazelrc embed labael to 2.6.0
1 parent 056785b commit 0f80e68

31 files changed

+539
-165
lines changed

.bazelrc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
# upstream tensorflow/serving version
2-
build --embed_label=2.5.1
2+
build --embed_label=2.6.0
33

44
build --verbose_failures
55
# enable proper toolchain resolution for cc rules

proto/tensorflow/core/BUILD

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,13 +17,14 @@ proto_library(
1717
srcs = [
1818
"protobuf/bfc_memory_map.proto",
1919
"protobuf/cluster.proto",
20+
"protobuf/composite_tensor_variant.proto",
2021
"protobuf/config.proto",
2122
"protobuf/control_flow.proto",
23+
"protobuf/data_service.proto",
2224
"protobuf/debug.proto",
2325
"protobuf/debug_event.proto",
2426
"protobuf/device_filters.proto",
2527
"protobuf/device_properties.proto",
26-
"protobuf/extension_type_variant.proto",
2728
"protobuf/graph_debug_info.proto",
2829
"protobuf/meta_graph.proto",
2930
"protobuf/named_tensor.proto",

proto/tensorflow/core/framework/BUILD

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,7 @@ proto_library(
1313
":cost_graph_proto",
1414
":dataset_options_proto",
1515
":device_attributes_proto",
16+
":full_type_proto",
1617
":function_proto",
1718
":graph_proto",
1819
":graph_transfer_info_proto",
@@ -145,6 +146,7 @@ proto_library(
145146
strip_import_prefix = "/proto/",
146147
deps = [
147148
":attr_value_proto",
149+
":full_type_proto",
148150
":resource_handle_proto",
149151
":tensor_proto",
150152
":tensor_shape_proto",
@@ -159,6 +161,7 @@ proto_library(
159161
strip_import_prefix = "/proto/",
160162
deps = [
161163
":attr_value_proto",
164+
":full_type_proto",
162165
":resource_handle_proto",
163166
":tensor_proto",
164167
":tensor_shape_proto",
@@ -215,6 +218,7 @@ proto_library(
215218
strip_import_prefix = "/proto/",
216219
deps = [
217220
":attr_value_proto",
221+
":full_type_proto",
218222
":function_proto",
219223
":node_def_proto",
220224
":op_def_proto",
@@ -233,6 +237,7 @@ proto_library(
233237
strip_import_prefix = "/proto/",
234238
deps = [
235239
":attr_value_proto",
240+
":full_type_proto",
236241
":node_def_proto",
237242
":op_def_proto",
238243
":resource_handle_proto",
@@ -242,6 +247,13 @@ proto_library(
242247
],
243248
)
244249

250+
proto_library(
251+
name = "full_type_proto",
252+
srcs = ["full_type.proto"],
253+
go_package = "github.com/emacski/tensorflow-serving-arm-client/go/tensorflow/core/framework",
254+
strip_import_prefix = "/proto/",
255+
)
256+
245257
proto_library(
246258
name = "device_attributes_proto",
247259
srcs = ["device_attributes.proto"],

proto/tensorflow/core/framework/dataset_options.proto

Lines changed: 43 additions & 54 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,8 @@ syntax = "proto3";
22

33
package tensorflow.data;
44

5+
option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/framework/dataset_options_go_proto";
6+
57
// Represents the type of auto-sharding we enable.
68
enum AutoShardPolicy {
79
// AUTO: Attempts FILE-based sharding, falling back to DATA-based sharding.
@@ -23,6 +25,27 @@ enum AutoShardPolicy {
2325
OFF = -1;
2426
}
2527

28+
// next: 4
29+
message AutotuneOptions {
30+
// Whether to automatically tune performance knobs.
31+
oneof optional_enabled {
32+
bool enabled = 1;
33+
}
34+
// When autotuning is enabled (through autotune), determines the CPU budget to
35+
// use. Values greater than the number of schedulable CPU cores are allowed
36+
// but may result in CPU contention.
37+
oneof optional_cpu_budget {
38+
int32 cpu_budget = 2;
39+
}
40+
// When autotuning is enabled (through autotune), determines the RAM budget to
41+
// use. Values greater than the available RAM in bytes may result in OOM. If
42+
// 0, defaults to half of the available RAM in bytes.
43+
oneof optional_ram_budget {
44+
int64 ram_budget = 3;
45+
}
46+
}
47+
48+
// next: 3
2649
message DistributeOptions {
2750
AutoShardPolicy auto_shard_policy = 1;
2851
// The number of devices attached to this input pipeline.
@@ -31,59 +54,25 @@ message DistributeOptions {
3154
}
3255
}
3356

34-
message MapVectorization {
35-
// Whether to vectorize map transformations.
36-
oneof optional_enabled {
37-
bool enabled = 1;
38-
}
39-
// Whether to use ChooseFastestBranchDataset with this transformation. If
40-
// True, the pipeline picks between the vectorized and original segment at
41-
// runtime based on their iterations speed.
42-
oneof optional_use_choose_fastest {
43-
bool use_choose_fastest = 2;
44-
}
45-
}
46-
57+
// next: 18
4758
message OptimizationOptions {
4859
// Whether to apply default graph optimizations. If False, only graph
4960
// optimizations that have been explicitly enabled will be applied.
5061
oneof optional_apply_default_optimizations {
5162
bool apply_default_optimizations = 1;
5263
}
53-
// Whether to automatically tune performance knobs.
54-
oneof optional_autotune {
55-
bool autotune = 2;
56-
}
57-
// When autotuning is enabled (through autotune), determines whether to also
58-
// autotune buffer sizes for datasets with parallelism.
59-
oneof optional_autotune_buffers {
60-
bool autotune_buffers = 3;
61-
}
62-
// When autotuning is enabled (through autotune), determines the CPU budget to
63-
// use. Values greater than the number of schedulable CPU cores are allowed
64-
// but may result in CPU contention.
65-
oneof optional_autotune_cpu_budget {
66-
int32 autotune_cpu_budget = 4;
67-
}
68-
// When autotuning is enabled (through autotune), determines the RAM budget to
69-
// use. Values greater than the available RAM in bytes may result in OOM. If
70-
// 0, defaults to half of the available RAM in bytes.
71-
oneof optional_autotune_ram_budget {
72-
int32 autotune_ram_budget = 5;
73-
}
64+
reserved 2;
65+
reserved 3;
66+
reserved 4;
67+
reserved 5;
7468
// Whether to fuse filter transformations.
7569
oneof optional_filter_fusion {
7670
bool filter_fusion = 6;
7771
}
78-
// Whether to fuse filter dataset that predicts random_uniform < rate into a
79-
// sampling dataset.
80-
oneof optional_filter_with_random_uniform_fusion {
81-
bool filter_with_random_uniform_fusion = 7;
82-
}
83-
// Whether to hoist tf.random_uniform() ops out of map transformations.
84-
oneof optional_hoist_random_uniform {
85-
bool hoist_random_uniform = 8;
86-
}
72+
// NOTE: field id 7 deleted in June 2021.
73+
reserved 7;
74+
// NOTE: field id 8 deleted in June 2021.
75+
reserved 8;
8776
// Whether to fuse map and batch transformations.
8877
oneof optional_map_and_batch_fusion {
8978
bool map_and_batch_fusion = 9;
@@ -100,8 +89,10 @@ message OptimizationOptions {
10089
oneof optional_map_parallelization {
10190
bool map_parallelization = 12;
10291
}
103-
// The map vectorization options associated with the dataset.
104-
MapVectorization map_vectorization = 13;
92+
93+
// NOTE: field id 13 deleted in June 2021.
94+
reserved 13;
95+
10596
// Whether to eliminate no-op transformations.
10697
oneof optional_noop_elimination {
10798
bool noop_elimination = 14;
@@ -115,21 +106,15 @@ message OptimizationOptions {
115106
oneof optional_parallel_batch {
116107
bool parallel_batch = 15;
117108
}
118-
// Whether to reorder ops that will discard data to the front of unary
119-
// cardinality preserving transformations, e.g. dataset.map(...).take(3) will
120-
// be optimized to dataset.take(3).map(...). For now this optimization will
121-
// move `skip`, `shard` and `take` to the front of `map` and `prefetch`. This
122-
// optimization is only for performance; it will not affect the output of the
123-
// dataset.
124-
oneof optional_reorder_data_discarding_ops {
125-
bool reorder_data_discarding_ops = 16;
126-
}
109+
// Field id 16 was removed in 06/2021.
110+
reserved 16;
127111
// Whether to fuse shuffle and repeat transformations.
128112
oneof optional_shuffle_and_repeat_fusion {
129113
bool shuffle_and_repeat_fusion = 17;
130114
}
131115
}
132116

117+
// next: 3
133118
message ThreadingOptions {
134119
// If set, it overrides the maximum degree of intra-op parallelism.
135120
oneof optional_max_intra_op_parallelism {
@@ -150,12 +135,16 @@ enum ExternalStatePolicy {
150135

151136
// Message stored with Dataset objects to control how datasets are processed and
152137
// optimized.
138+
//
139+
// next: 8
153140
message Options {
154141
// Whether the outputs need to be produced in deterministic order.
155142
oneof optional_deterministic {
156143
bool deterministic = 1;
157144
}
158145
// The distribution strategy options associated with the dataset.
146+
AutotuneOptions autotune_options = 7;
147+
// The distribution strategy options associated with the dataset.
159148
DistributeOptions distribute_options = 2;
160149
// The optimization options associated with the dataset.
161150
OptimizationOptions optimization_options = 3;

proto/tensorflow/core/framework/device_attributes.proto

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -50,4 +50,9 @@ message DeviceAttributes {
5050

5151
// String representation of the physical device that this device maps to.
5252
string physical_device_desc = 7;
53+
54+
// A physical device ID for use in XLA DeviceAssignments, unique across
55+
// clients in a multi-client setup. Set to -1 if unavailable, non-negative
56+
// otherwise.
57+
int64 xla_global_id = 8;
5358
}

0 commit comments

Comments
 (0)