Skip to content

Commit 1d6f252

Browse files
committed
Updates compat proto copies by running our script to sync with TF protos.
1 parent 6201639 commit 1d6f252

File tree

8 files changed

+68
-20
lines changed

8 files changed

+68
-20
lines changed

tensorboard/compat/proto/BUILD

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -238,7 +238,6 @@ tb_proto_library(
238238
tb_proto_library(
239239
name = "summary",
240240
srcs = ["summary.proto"],
241-
exports = [":histogram"],
242241
deps = [
243242
":histogram",
244243
":tensor",

tensorboard/compat/proto/config.proto

Lines changed: 32 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -18,9 +18,9 @@ option java_package = "org.tensorflow.framework";
1818
option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/protobuf/for_core_protos_go_proto";
1919

2020
message GPUOptions {
21-
// Fraction of the available GPU memory to allocate for each process.
21+
// Fraction of the total GPU memory to allocate for each process.
2222
// 1 means to allocate all of the GPU memory, 0.5 means the process
23-
// allocates up to ~50% of the available GPU memory.
23+
// allocates up to ~50% of the total GPU memory.
2424
//
2525
// GPU memory is pre-allocated unless the allow_growth option is enabled.
2626
//
@@ -239,6 +239,19 @@ message GPUOptions {
239239
// hopes that another thread will free up memory in the meantime. Setting
240240
// this to true disables the sleep; instead we'll OOM immediately.
241241
bool disallow_retry_on_allocation_failure = 12;
242+
243+
// Memory limit for "GPU host allocator", aka pinned memory allocator. This
244+
// can also be set via the envvar TF_GPU_HOST_MEM_LIMIT_IN_MB.
245+
float gpu_host_mem_limit_in_mb = 13;
246+
247+
// If true, then the host allocator allocates its max memory all upfront and
248+
// never grows. This can be useful for latency-sensitive systems, because
249+
// growing the GPU host memory pool can be expensive.
250+
//
251+
// You probably only want to use this in combination with
252+
// gpu_host_mem_limit_in_mb, because the default GPU host memory limit is
253+
// quite high.
254+
bool gpu_host_mem_disallow_growth = 14;
242255
}
243256

244257
// Everything inside experimental is subject to change and is not subject
@@ -582,7 +595,8 @@ message ConfigProto {
582595
// If set, this can be used by the runtime and the Ops for debugging,
583596
// monitoring, etc.
584597
//
585-
// NOTE: This is currently used and propagated only by the direct session.
598+
// NOTE: This is currently used and propagated only by the direct session
599+
// and EagerContext.
586600
SessionMetadata session_metadata = 11;
587601

588602
// If true, the session may treat the graph as being static for optimization
@@ -615,18 +629,9 @@ message ConfigProto {
615629
MLIR_BRIDGE_ROLLOUT_ENABLED = 1;
616630
// Disabling the MLIR bridge disables it for all graphs in this session.
617631
MLIR_BRIDGE_ROLLOUT_DISABLED = 2;
618-
// Enable the MLIR bridge on a per graph basis based on an analysis of
619-
// the features used in the graph. If the features used by the graph are
620-
// supported by the MLIR bridge, the MLIR bridge will be used to run the
621-
// graph.
622-
MLIR_BRIDGE_ROLLOUT_SAFE_MODE_ENABLED = 3;
623-
// Enable the MLIR bridge in a fallback mode on a per graph basis based
624-
// on an analysis of the features used in the graph.
625-
// Running the MLIR bridge in the fallback mode means that it is
626-
// executed and it commits all the changes to the TF graph in case
627-
// of success. And it does not in case of failures and let the old bridge
628-
// to process the TF graph.
629-
MLIR_BRIDGE_ROLLOUT_SAFE_MODE_FALLBACK_ENABLED = 4;
632+
reserved 3, 4;
633+
reserved "MLIR_BRIDGE_ROLLOUT_SAFE_MODE_ENABLED",
634+
"MLIR_BRIDGE_ROLLOUT_SAFE_MODE_FALLBACK_ENABLED";
630635
}
631636
// Whether to enable the MLIR-based TF->XLA bridge.
632637
MlirBridgeRollout mlir_bridge_rollout = 17;
@@ -675,7 +680,18 @@ message ConfigProto {
675680
// Distributed coordination service configurations.
676681
CoordinationServiceConfig coordination_config = 23;
677682

678-
// Next: 24
683+
// If true, the session will treat the graph as being non-static for
684+
// optimization purposes.
685+
//
686+
// If this option is set to true when a session is created, the full
687+
// GraphDef will be retained to enable calls to Session::Extend().
688+
// Calling Extend() without setting this flag will result in errors.
689+
//
690+
// This option is meant to replace `optimize_for_static_graph` and it
691+
// aims to negate its value.
692+
bool disable_optimize_for_static_graph = 24;
693+
694+
// Next: 25
679695
}
680696

681697
Experimental experimental = 16;

tensorboard/compat/proto/full_type.proto

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -186,6 +186,17 @@ enum FullTypeId {
186186
// TFT_ENCODING[TFT_INT32, TFT_STRING] is an integer encoded as string.
187187
TFT_ENCODED = 1004;
188188

189+
// The type of "shape tensors" where the runtime value is the shape of
190+
// some tensor(s), i.e. the output of tf.shape.
191+
// Shape tensors have special, host-only placement, in contrast to
192+
// TFT_TENSOR[TFT_INT32] which is the type of a normal numeric tensor
193+
// with no special placement.
194+
//
195+
// Examples:
196+
// TFT_SHAPE_TENSOR[TFT_INT32] is the most common
197+
// TFT_SHAPE_TENSOR[TFT_INT64] is also allowed
198+
TFT_SHAPE_TENSOR = 1005;
199+
189200
// Type attributes. These always appear in the parametrization of a type,
190201
// never alone. For example, there is no such thing as a "bool" TensorFlow
191202
// object (for now).

tensorboard/compat/proto/tensor.proto

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -83,6 +83,10 @@ message TensorProto {
8383

8484
// DT_UINT64
8585
repeated uint64 uint64_val = 17 [packed = true];
86+
87+
// DT_FLOAT8_*, use variable-sized set of bytes
88+
// (i.e. the equivalent of repeated uint8, if such a thing existed).
89+
bytes float8_val = 18;
8690
}
8791

8892
// Protocol buffer representing the serialization format of DT_VARIANT tensors.

tensorboard/compat/proto/tfprof_log.proto

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,12 @@
11
syntax = "proto3";
22

33
package tensorboard;
4-
option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/profiler/tfprof_log_go_proto";
54

65
import "tensorboard/compat/proto/attr_value.proto";
76
import "tensorboard/compat/proto/step_stats.proto";
87

8+
option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/profiler/protos_all_go_proto";
9+
910
// It specifies the Python callstack that creates an op.
1011
message CodeDef {
1112
repeated Trace traces = 1;

tensorboard/compat/proto/types.proto

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@ enum DataType {
3030
DT_QINT8 = 11; // Quantized int8
3131
DT_QUINT8 = 12; // Quantized uint8
3232
DT_QINT32 = 13; // Quantized int32
33-
DT_BFLOAT16 = 14; // Float32 truncated to 16 bits. Only for cast ops.
33+
DT_BFLOAT16 = 14; // Float32 truncated to 16 bits.
3434
DT_QINT16 = 15; // Quantized int16
3535
DT_QUINT16 = 16; // Quantized uint16
3636
DT_UINT16 = 17;
@@ -40,6 +40,9 @@ enum DataType {
4040
DT_VARIANT = 21; // Arbitrary C++ data types
4141
DT_UINT32 = 22;
4242
DT_UINT64 = 23;
43+
DT_FLOAT8_E5M2 = 24; // 5 exponent bits, 2 mantissa bits.
44+
DT_FLOAT8_E4M3FN = 25; // 4 exponent bits, 3 mantissa bits, finite-only, with
45+
// 2 NaNs (0bS1111111).
4346

4447
// Do not use! These are only for parameters. Every enum above
4548
// should have a corresponding value below (verified by types_test).
@@ -66,6 +69,8 @@ enum DataType {
6669
DT_VARIANT_REF = 121;
6770
DT_UINT32_REF = 122;
6871
DT_UINT64_REF = 123;
72+
DT_FLOAT8_E5M2_REF = 124;
73+
DT_FLOAT8_E4M3FN_REF = 125;
6974
}
7075
// DISABLED.ThenChange(
7176
// https://www.tensorflow.org/code/tensorflow/c/tf_datatype.h,
553 Bytes
Binary file not shown.

tensorboard/data/server/tensorboard.pb.rs

Lines changed: 13 additions & 1 deletion
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

0 commit comments

Comments
 (0)