Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion VERSION
Original file line number Diff line number Diff line change
@@ -1 +1 @@
2.6.0
2.18.0
9 changes: 2 additions & 7 deletions proto/tensorflow/core/example/example.proto
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,6 @@ option java_multiple_files = true;
option java_package = "org.tensorflow.example";
option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/example";

// LINT.IfChange
// An Example is a mostly-normalized data format for storing data for
// training and inference. It contains a key-value store (features); where
// each key (string) maps to a Feature message (which is oneof packed BytesList,
Expand All @@ -22,10 +21,8 @@ option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/example
// read and write this format. That is, the Example is mostly *not* a
// self-describing format. In TensorFlow, Examples are read in row-major
// format, so any configuration that describes data with rank-2 or above
// should keep this in mind. For example, to store an M x N matrix of Bytes,
// the BytesList must contain M*N bytes, with M rows of N contiguous values
// each. That is, the BytesList value must store the matrix as:
// .... row 0 .... .... row 1 .... // ........... // ... row M-1 ....
// should keep this in mind. If you flatten a matrix into a FloatList it should
// be stored as [ row 0 ... row 1 ... row M-1 ]
//
// An Example for a movie recommendation application:
// features {
Expand Down Expand Up @@ -302,5 +299,3 @@ message SequenceExample {
Features context = 1;
FeatureLists feature_lists = 2;
}
// LINT.ThenChange(
// https://www.tensorflow.org/code/tensorflow/python/training/training.py)
2 changes: 1 addition & 1 deletion proto/tensorflow/core/framework/attr_value.proto
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ message AttrValue {
repeated TensorProto tensor = 8; // "list(tensor)"
repeated NameAttrList func = 9; // "list(attr)"
}
// LINT.ThenChange(https://www.tensorflow.org/code/tensorflow/c/c_api.cc)
// LINT.ThenChange(//tensorflow/c/c_api.cc)

oneof value {
bytes s = 2; // "string"
Expand Down
166 changes: 140 additions & 26 deletions proto/tensorflow/core/framework/full_type.proto
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ option java_multiple_files = true;
option java_package = "org.tensorflow.framework";
option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/framework";

// LINT.IfChange
// Experimental. Represents the complete type information of a TensorFlow value.
enum FullTypeId {
// The default represents an uninitialized values.
Expand All @@ -25,7 +26,7 @@ enum FullTypeId {
// TFT_TENSOR[TFT_VAR["T"]], TFT_TENSOR[TFT_VAR["T"]] are two tensors of
// identical element types.
// TFT_TENSOR[TFT_VAR["P"]], TFT_TENSOR[TFT_VAR["Q"]] are two tensors of
// potentially different element types.
// independent element types.
//
TFT_VAR = 1;

Expand All @@ -45,14 +46,53 @@ enum FullTypeId {
//
TFT_PRODUCT = 3;

// Represents a named field, with the name stored in the attribute.
//
// Parametrization:
// TFT_NAMED[<type>]{<name>}
// * <type> is the type of the field
// * <name> is the field name, as string (thpugh can theoretically be an int
// as well)
//
// Example:
// TFT_RECORD[
// TFT_NAMED[TFT_TENSOR[TFT_INT32]]{'foo'},
// TFT_NAMED[TFT_TENSOR[TFT_FLOAT32]]{'bar'},
// ]
// is a structure with two fields, an int tensor "foo" and a float tensor
// "bar".
TFT_NAMED = 4;

// Template definition. Expands the variables by repeating a template as
// arguments of container.
//
// Parametrization:
// TFT_FOR_EACH[<container_type>, <template>, <expansions>]
// * <container_type> is the type of the container that the template will be
// expanded into
// * <template> is any type definition that potentially contains type
// variables
// * <expansions> is a TFT_VAR and may include more types in the future
//
// Example:
// TFT_FOR_EACH[
// TFT_PRODUCT,
// TFT_TENSOR[TFT_VAR["t"]],
// TFT_VAR["t"]
// ]
// will substitute a T = TFT_INT32 to TFT_PRODUCT[TFT_TENSOR[TFT_INT32]]
// and a T = (TFT_INT32, TFT_INT64) to
// TFT_PRODUCT[TFT_TENSOR[TFT_INT32], TFT_TENSOR[TFT_INT64]].
TFT_FOR_EACH = 20;

// Callable types describe functions and ops.
//
// Parametrization:
// TFT_CALLABLE[<arg type>, <return type>]
// * <arg_type> is the type of the arguments; TFT_PRODUCT represents
// * <arg type> is the type of the arguments; TFT_PRODUCT represents
// multiple
// arguments.
// * <return_type> is the return type; TFT_PRODUCT represents multiple
// * <return type> is the return type; TFT_PRODUCT represents multiple
// return values (that means that callables returning multiple things
// don't necessarily return a single tuple).
//
Expand All @@ -73,9 +113,9 @@ enum FullTypeId {
//
// Parametrization:
// TFT_TENSOR[<element type>, <shape type>]
// * <element_type> is currently limited to one of the element types
// * <element type> is currently limited to one of the element types
// defined below.
// * <shape_type> is not yet defined, and may only be TFT_UNKNOWN for now.
// * <shape type> is not yet defined, and may only be TFT_UNKNOWN for now.
//
// A TFT_SHAPE type will be defined in the future.
//
Expand All @@ -97,7 +137,7 @@ enum FullTypeId {
//
// Parametrization:
// TFT_ARRAY[<element type>]
// * <element_type> may be any concrete type.
// * <element type> may be any concrete type.
//
// Examples:
// TFT_ARRAY[TFT_TENSOR[TFT_INT32]] is a TensorArray holding int32 Tensors
Expand All @@ -115,35 +155,47 @@ enum FullTypeId {
//
// Parametrization:
// TFT_OPTIONAL[<element type>]
// * <element_type> may be any concrete type.
// * <element type> may be any concrete type.
//
// Examples:
// TFT_OPTIONAL[TFT_TENSOR[TFT_INT32]] is an Optional holding an int32
// Tensor of any shape.
TFT_OPTIONAL = 1002;

// Datasets created by tf.data ops and APIs. Datasets have generator/iterable
// semantics, that is, one can construct an iterator from them. Like
// Array, they are considered to return elements that can be described
// by a single type. Unlike Array, they do not support random access or
// mutation, and can potentially produce an infinite number of elements.
// A datasets can produce logical structures (e.g. multiple elements). This
// is expressed using TFT_PRODUCT.
// Literal types describe compile-time constant values.
// Literal types may also participate in dependent types.
//
// Parametrization:
// TFT_LITERAL[<value type>]{<value>}
// * <value type> may be any concrete type compatible that can hold <value>
// * <value> is the type's attribute, and holds the actual literal value
//
// Examples:
// TFT_LITERAL[TFT_INT32]{1} is the compile-time constant 1.
TFT_LITERAL = 1003;

// Encoding types describe a value of a certain type, encoded as a different
// type.
//
// Parametrization: TFT_ARRAY[<element type>].
// <element_type> may be a concrete type or a type symbol. It represents the
// data type of the elements produced by the dataset.
// Parametrization:
// TFT_ENCODED[<encoded type>, <encoding type>]
// * <encoded type> may be any type
// * <encoding type> may be any type
//
// Examples:
// TFT_DATSET[TFT_TENSOR[TFT_INT32]] is a Dataset producing single int32
// Tensors of unknown shape.
// TFT_DATSET[TFT_PRODUCT[TFT_TENSOR[TFT_INT32], TFT_TENSOR[TFT_FLOAT32]] is
// a
// Dataset producing pairs of Tensors, one integer and one float.
// Note: The high ID number is to prepare for the eventuality that Datasets
// will be supported by user types in the future.
TFT_DATASET = 10102;
// TFT_ENCODING[TFT_INT32, TFT_STRING] is an integer encoded as string.
TFT_ENCODED = 1004;

// The type of "shape tensors" where the runtime value is the shape of
// some tensor(s), i.e. the output of tf.shape.
// Shape tensors have special, host-only placement, in contrast to
// TFT_TENSOR[TFT_INT32] which is the type of a normal numeric tensor
// with no special placement.
//
// Examples:
// TFT_SHAPE_TENSOR[TFT_INT32] is the most common
// TFT_SHAPE_TENSOR[TFT_INT64] is also allowed
TFT_SHAPE_TENSOR = 1005;

// Type attributes. These always appear in the parametrization of a type,
// never alone. For example, there is no such thing as a "bool" TensorFlow
Expand Down Expand Up @@ -172,6 +224,65 @@ enum FullTypeId {
TFT_COMPLEX128 = 213;
// The string element type.
TFT_STRING = 214;

// Other types that we don't know yet whether they will become part of the
// core type system or be consisdered third-party (and consequently moved to
// user-defined type mechanisms). Presently, they are effectively in the core
// type system, because key compilation passes like Placer account for their
// existence.

// Datasets created by tf.data ops and APIs. Datasets have generator/iterable
// semantics, that is, one can construct an iterator from them. Like
// Array, they are considered to return elements that can be described
// by a single type. Unlike Array, they do not support random access or
// mutation, and can potentially produce an infinite number of elements.
// A datasets can produce logical structures (e.g. multiple elements). This
// is expressed using TFT_PRODUCT.
//
//
// Parametrization: TFT_DATASET[<element type>].
// * <element type> may be a concrete type or a type symbol. It represents
// the data type of the elements produced by the dataset.
//
// Examples:
// TFT_DATSET[TFT_TENSOR[TFT_INT32]] is a Dataset producing single int32
// Tensors of unknown shape.
// TFT_DATSET[TFT_PRODUCT[TFT_TENSOR[TFT_INT32], TFT_TENSOR[TFT_FLOAT32]] is
// a Dataset producing pairs of Tensors, one integer and one float.
// Note: The high ID number is to prepare for the eventuality that Datasets
// will be supported by user types in the future.
TFT_DATASET = 10102;

// A ragged tensor created by tf.ragged ops and APIs.
//
// Parametrization: TFT_RAGGED[<element_type>].
TFT_RAGGED = 10103;

// Iterators created by tf.data ops and APIs. Very similar to Datasets, except
// they are mutable.
//
//
// Parametrization: TFT_ITERATOR[<element type>].
// * <element type> may be a concrete type or a type symbol. It represents
// the data type of the elements produced by the dataset.
TFT_ITERATOR = 10104;

// A mutex lock tensor, produced by tf.raw_ops.MutexLock.
// Unlike strict execution models, where ownership of a lock is denoted by
// "running after the lock has been acquired", in non-strict mode, lock
// ownership is in the true sense: "the op argument representing the lock is
// available".
// Mutex locks are the dynamic counterpart of control dependencies.
// TODO(mdan): Properly document this thing.
//
// Parametrization: TFT_MUTEX_LOCK[].
TFT_MUTEX_LOCK = 10202;

// The equivalent of a Tensor with DT_VARIANT dtype, kept here to simplify
// translation. This type should not normally appear after type inference.
// Note that LEGACY_VARIANT != ANY: TENSOR[INT32] is a subtype of ANY, but is
// not a subtype of LEGACY_VARIANT.
TFT_LEGACY_VARIANT = 10203;
}

// Highly experimental and very likely to change.
Expand All @@ -186,11 +297,14 @@ message FullTypeDef {

repeated FullTypeDef args = 2;

// Literal values of this type object, if the the type admits one.
// Literal values of this type object, if the type admits one.
// For example, a type variable admits a string attribute - its name.
// Shape-related types may admit int attributes - their static shape values.
// Fields for more data types to be added as needed.
oneof attr {
string s = 3;
int64 i = 4;
// TODO(mdan): list/tensor, map? Need to reconcile with TFT_RECORD, etc.
}
}
// LINT.ThenChange(../ir/types/attributes.td)
4 changes: 4 additions & 0 deletions proto/tensorflow/core/framework/graph.proto
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ syntax = "proto3";
package tensorflow;

import "tensorflow/core/framework/function.proto";
import "tensorflow/core/framework/graph_debug_info.proto";
import "tensorflow/core/framework/node_def.proto";
import "tensorflow/core/framework/versions.proto";

Expand Down Expand Up @@ -53,4 +54,7 @@ message GraphDef {
// consumer does not start until all return values of the callee
// function are ready.
FunctionDefLibrary library = 2;

// Stack traces for the nodes in this graph.
GraphDebugInfo debug_info = 5;
}
61 changes: 61 additions & 0 deletions proto/tensorflow/core/framework/graph_debug_info.proto
Original file line number Diff line number Diff line change
@@ -0,0 +1,61 @@
syntax = "proto2";

package tensorflow;

option cc_enable_arenas = true;
option java_outer_classname = "GraphDebugInfoProtos";
option java_multiple_files = true;
option java_package = "org.tensorflow.framework";
option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/protobuf";

message GraphDebugInfo {
// This represents a file/line location in the source code.
message FileLineCol {
// File name index, which can be used to retrieve the file name string from
// `files`. The value should be between 0 and (len(files)-1)
optional int32 file_index = 1;

// Line number in the file.
optional int32 line = 2;

// Col number in the file line.
optional int32 col = 3;

// Name of function contains the file line.
optional string func = 4;

// Source code contained in this file line.
optional string code = 5;
}

// This represents a stack trace which is a ordered list of `FileLineCol`.
message StackTrace {
repeated FileLineCol file_line_cols = 1; // Deprecated.
repeated fixed64 frame_id = 2 [packed = true];
}

// This stores all the source code file names and can be indexed by the
// `file_index`.
repeated string files = 1;

// Stack traces and frames are uniqueified during construction. These maps
// index from the unique id for a frame/trace to the value.
map<fixed64, FileLineCol> frames_by_id = 4;
map<fixed64, StackTrace> traces_by_id = 6;

map<string, StackTrace> traces = 2; // Deprecated.

// This maps a node name to a trace id contained in `traces_by_id`.
//
// The map key is a mangling of the containing function and op name with
// syntax:
// op.name '@' func_name
// For ops in the top-level graph, the func_name is the empty string and hence
// the `@` may be ommitted.
// Note that op names are restricted to a small number of characters which
// exclude '@', making it impossible to collide keys of this form. Function
// names accept a much wider set of characters.
// It would be preferable to avoid mangling and use a tuple key of (op.name,
// func_name), but this is not supported with protocol buffers.
map<string, fixed64> name_to_trace_id = 5;
}
7 changes: 7 additions & 0 deletions proto/tensorflow/core/framework/node_def.proto
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ syntax = "proto3";
package tensorflow;

import "tensorflow/core/framework/attr_value.proto";
import "tensorflow/core/framework/full_type.proto";

option cc_enable_arenas = true;
option java_outer_classname = "NodeProto";
Expand Down Expand Up @@ -85,4 +86,10 @@ message NodeDef {

// This stores debug information associated with the node.
ExperimentalDebugInfo experimental_debug_info = 6;

// The complete type of this node. Experimental and subject to change.
// Currently, the field only contains the return types of the node. That will
// extend in the future to contain the entire signature of the node, as a
// function type.
FullTypeDef experimental_type = 7;
}
2 changes: 2 additions & 0 deletions proto/tensorflow/core/framework/resource_handle.proto
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,9 @@ message ResourceHandleProto {

// Protocol buffer representing a pair of (data type, tensor shape).
message DtypeAndShape {
// Data type of the tensor.
DataType dtype = 1;
// Shape of the tensor.
TensorShapeProto shape = 2;
}

Expand Down
Loading
Loading