diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml new file mode 100644 index 00000000000..3d897574fb3 --- /dev/null +++ b/.github/workflows/lint.yml @@ -0,0 +1,14 @@ +name: pre-commit + +on: + pull_request: + push: + branches: [master] + +jobs: + pre-commit: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 + - uses: pre-commit/action@2c7b3805fd2a0fd8c1884dcaebf91fc102a13ecd # v3.0.1 diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 00000000000..6635c00d042 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,37 @@ +# pre-commit is a tool to perform a predefined set of tasks manually and/or +# automatically before git commits are made. +# +# Config reference: https://pre-commit.com/#pre-commit-configyaml---top-level +# +# Common tasks +# +# - Register git hooks: pre-commit install +# - Run on all files: pre-commit run --all-files +# +# These pre-commit hooks are run as CI. +# +# NOTE: if it can be avoided, add configs/args in pyproject.toml or below instead of creating a new `.config.file`. +# https://pre-commit.ci/#configuration +ci: + autoupdate_schedule: monthly + autofix_commit_msg: | + [pre-commit.ci] Apply automatic pre-commit fixes + +repos: + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.6.0 + hooks: + - id: end-of-file-fixer + exclude: '\.(svg|patch)$' + - id: trailing-whitespace + exclude: '\.(svg|patch)$' + - id: check-json + - id: check-yaml + args: [--allow-multiple-documents, --unsafe] + - id: check-toml + + - repo: https://github.com/astral-sh/ruff-pre-commit + rev: v0.5.6 + hooks: + - id: ruff + args: ["--fix"] diff --git a/WORKSPACE b/WORKSPACE index f6072b07552..4b43f14d27e 100644 --- a/WORKSPACE +++ b/WORKSPACE @@ -159,4 +159,3 @@ load( ) nccl_configure(name = "local_config_nccl") - diff --git a/ruff.toml b/ruff.toml new file mode 100644 index 00000000000..781ba95c1c3 --- /dev/null +++ b/ruff.toml @@ -0,0 +1,90 @@ +line-length = 88 + +[lint] + +select = [ + # pycodestyle + "E", + "W", + # Pyflakes + "F", + # pyupgrade + "UP", + # flake8-bugbear + "B", + # flake8-simplify + "SIM", + # isort + "I", + # pep8 naming + "N", + # pydocstyle + "D", + # annotations + "ANN", + # debugger + "T10", + # flake8-pytest + "PT", + # flake8-return + "RET", + # flake8-unused-arguments + "ARG", + # flake8-fixme + "FIX", + # flake8-eradicate + "ERA", + # pandas-vet + "PD", + # numpy-specific rules + "NPY", +] + +ignore = [ + "D104", # Missing docstring in public package + "D100", # Missing docstring in public module + "D211", # No blank line before class + "PD901", # Avoid using 'df' for pandas dataframes. Perfectly fine in functions with limited scope + "ANN201", # Missing return type annotation for public function (makes no sense for NoneType return types...) + "ANN101", # Missing type annotation for `self` + "ANN204", # Missing return type annotation for special method + "ANN002", # Missing type annotation for `*args` + "ANN003", # Missing type annotation for `**kwargs` + "D105", # Missing docstring in magic method + "D203", # 1 blank line before after class docstring + "D204", # 1 blank line required after class docstring + "D413", # 1 blank line after parameters + "SIM108", # Simplify if/else to one line; not always clearer + "D206", # Docstrings should be indented with spaces; unnecessary when running ruff-format + "E501", # Line length too long; unnecessary when running ruff-format + "W191", # Indentation contains tabs; unnecessary when running ruff-format + + # FIX AND REMOVE BELOW CODES: + "ANN001", # Missing type annotation for function argument + "ANN102", # Missing type annotation for `cls` in classmethod + "ANN202", # Missing return type annotation for private function + "ANN205", # Missing return type annotation for staticmethod + "ANN206", # Missing return type annotation for classmethod + "D102", # Missing docstring in public method + "D103", # Missing docstring in public function + "D107", # Missing docstring in `__init__` + "E402", # Module level import not at top of file + "ERA001", # Found commented-out code + "F401", # `module` imported but unused + "F405", # Name may be undefined, or defined from star imports + "FIX002", # Line contains TODO, consider resolving the issue + "N802", # Function name should be lowercase + "PT009", # Use a regular `assert` instead of unittest-style `assertEqual` / `assertIsInstance` + "PT027", # Use `pytest.raises` instead of unittest-style `assertRaisesRegex` + "UP028", # Replace `yield` over `for` loop with `yield from` + "UP029", # Unnecessary builtin import + "RET503", # Missing explicit `return` at the end of function able to return non-`None` value +] + +[lint.pyupgrade] +# Preserve types, even if a file imports `from __future__ import annotations`. +# Remove when Python 3.9 is no longer supported +keep-runtime-typing = true + +[lint.pydocstyle] +convention = "google" diff --git a/tensorflow_serving/apis/model_service_pb2.py b/tensorflow_serving/apis/model_service_pb2.py index 76f1d606cb2..9d786855661 100644 --- a/tensorflow_serving/apis/model_service_pb2.py +++ b/tensorflow_serving/apis/model_service_pb2.py @@ -17,9 +17,9 @@ # To regenerate run # python -m grpc.tools.protoc --python_out=. --grpc_python_out=. -I. tensorflow_serving/apis/model_service.proto -import sys +import sys # noqa: I001 _b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) -from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor as _descriptor # noqa: I001 from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database @@ -29,7 +29,7 @@ _sym_db = _symbol_database.Default() -from tensorflow_serving.apis import get_model_status_pb2 as tensorflow__serving_dot_apis_dot_get__model__status__pb2 +from tensorflow_serving.apis import get_model_status_pb2 as tensorflow__serving_dot_apis_dot_get__model__status__pb2 # noqa: I001 from tensorflow_serving.apis import model_management_pb2 as tensorflow__serving_dot_apis_dot_model__management__pb2 DESCRIPTOR = _descriptor.FileDescriptor( diff --git a/tensorflow_serving/apis/model_service_pb2_grpc.py b/tensorflow_serving/apis/model_service_pb2_grpc.py index 44c578b8648..aeaeb11c8f1 100644 --- a/tensorflow_serving/apis/model_service_pb2_grpc.py +++ b/tensorflow_serving/apis/model_service_pb2_grpc.py @@ -17,16 +17,16 @@ # To regenerate run # python -m grpc.tools.protoc --python_out=. --grpc_python_out=. -I. tensorflow_serving/apis/model_service.proto -import grpc +import grpc # noqa: I001 from tensorflow_serving.apis import get_model_status_pb2 as tensorflow__serving_dot_apis_dot_get__model__status__pb2 from tensorflow_serving.apis import model_management_pb2 as tensorflow__serving_dot_apis_dot_model__management__pb2 -class ModelServiceStub(object): +class ModelServiceStub(object): # noqa: UP004 """ModelService provides methods to query and update the state of the server, e.g. which models/versions are being served. - """ + """ # noqa: D205 def __init__(self, channel): """Constructor. @@ -50,26 +50,26 @@ def __init__(self, channel): ) -class ModelServiceServicer(object): +class ModelServiceServicer(object): # noqa: UP004 """ModelService provides methods to query and update the state of the server, e.g. which models/versions are being served. - """ + """ # noqa: D205 - def GetModelStatus(self, request, context): + def GetModelStatus(self, request, context): # noqa: ARG002 """Gets status of model. If the ModelSpec in the request does not specify version, information about all versions of the model will be returned. If the ModelSpec in the request does specify a version, the status of only that version will be returned. - """ + """ # noqa: D205 context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') - def HandleReloadConfigRequest(self, request, context): + def HandleReloadConfigRequest(self, request, context): # noqa: ARG002 """Reloads the set of served models. The new config supersedes the old one, so if a model is omitted from the new config it will be unloaded and no longer served. - """ + """ # noqa: D205 context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') diff --git a/tensorflow_serving/apis/prediction_service_pb2.py b/tensorflow_serving/apis/prediction_service_pb2.py index e51a700f720..a31bf10d2e8 100644 --- a/tensorflow_serving/apis/prediction_service_pb2.py +++ b/tensorflow_serving/apis/prediction_service_pb2.py @@ -21,9 +21,9 @@ # Generated by the protocol buffer compiler. DO NOT EDIT! # source: tensorflow_serving/apis/prediction_service.proto -import sys +import sys # noqa: I001 _b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) -from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor as _descriptor # noqa: I001 from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database @@ -33,7 +33,7 @@ _sym_db = _symbol_database.Default() -from tensorflow_serving.apis import classification_pb2 as tensorflow__serving_dot_apis_dot_classification__pb2 +from tensorflow_serving.apis import classification_pb2 as tensorflow__serving_dot_apis_dot_classification__pb2 # noqa: I001 from tensorflow_serving.apis import get_model_metadata_pb2 as tensorflow__serving_dot_apis_dot_get__model__metadata__pb2 from tensorflow_serving.apis import inference_pb2 as tensorflow__serving_dot_apis_dot_inference__pb2 from tensorflow_serving.apis import predict_pb2 as tensorflow__serving_dot_apis_dot_predict__pb2 diff --git a/tensorflow_serving/apis/prediction_service_pb2_grpc.py b/tensorflow_serving/apis/prediction_service_pb2_grpc.py index 082f94a39a0..7b8e5af76a6 100644 --- a/tensorflow_serving/apis/prediction_service_pb2_grpc.py +++ b/tensorflow_serving/apis/prediction_service_pb2_grpc.py @@ -16,7 +16,7 @@ # source: tensorflow_serving/apis/prediction_service.proto # To regenerate run # python -m grpc.tools.protoc --python_out=. --grpc_python_out=. -I. tensorflow_serving/apis/prediction_service.proto -import grpc +import grpc # noqa: I001 from tensorflow_serving.apis import classification_pb2 as tensorflow__serving_dot_apis_dot_classification__pb2 from tensorflow_serving.apis import get_model_metadata_pb2 as tensorflow__serving_dot_apis_dot_get__model__metadata__pb2 @@ -25,11 +25,11 @@ from tensorflow_serving.apis import regression_pb2 as tensorflow__serving_dot_apis_dot_regression__pb2 -class PredictionServiceStub(object): +class PredictionServiceStub(object): # noqa: UP004 """open source marker; do not remove PredictionService provides access to machine-learned models loaded by model_servers. - """ + """ # noqa: D205 def __init__(self, channel): """Constructor. @@ -64,43 +64,43 @@ def __init__(self, channel): ) -class PredictionServiceServicer(object): +class PredictionServiceServicer(object): # noqa: UP004 """open source marker; do not remove PredictionService provides access to machine-learned models loaded by model_servers. - """ + """ # noqa: D205 - def Classify(self, request, context): + def Classify(self, request, context): # noqa: ARG002 """Classify. - """ + """ # noqa: D200 context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') - def Regress(self, request, context): + def Regress(self, request, context): # noqa: ARG002 """Regress. - """ + """ # noqa: D200 context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') - def Predict(self, request, context): + def Predict(self, request, context): # noqa: ARG002 """Predict -- provides access to loaded TensorFlow model. - """ + """ # noqa: D200 context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') - def MultiInference(self, request, context): + def MultiInference(self, request, context): # noqa: ARG002 """MultiInference API for multi-headed models. - """ + """ # noqa: D200 context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') - def GetModelMetadata(self, request, context): + def GetModelMetadata(self, request, context): # noqa: ARG002 """GetModelMetadata - provides access to metadata for loaded models. - """ + """ # noqa: D200 context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') diff --git a/tensorflow_serving/batching/test_util/matrix_half_plus_two_saved_model.py b/tensorflow_serving/batching/test_util/matrix_half_plus_two_saved_model.py index d111459009c..75c4779c13f 100644 --- a/tensorflow_serving/batching/test_util/matrix_half_plus_two_saved_model.py +++ b/tensorflow_serving/batching/test_util/matrix_half_plus_two_saved_model.py @@ -13,7 +13,7 @@ # limitations under the License. # ============================================================================== -import tensorflow.compat.v1 as tf +import tensorflow.compat.v1 as tf # noqa: I001 FLAGS = tf.app.flags.FLAGS tf.app.flags.DEFINE_string("output_dir", "/tmp/matrix_half_plus_two/1", @@ -28,7 +28,7 @@ def _generate_saved_model_for_matrix_half_plus_two(export_dir): the result will be [[2.5, 3, 3.5], [4, 4.5, 5], [5.5, 6, 6.5]]. Args: export_dir: The directory where to write SavedModel files. - """ + """ # noqa: D205, D208, D214, D411 builder = tf.saved_model.builder.SavedModelBuilder(export_dir) with tf.Session() as session: x = tf.placeholder(tf.float32, shape=[None, 3, 3], name="x") diff --git a/tensorflow_serving/example/mnist_client.py b/tensorflow_serving/example/mnist_client.py index fd90af69b2d..8ab4d36f471 100644 --- a/tensorflow_serving/example/mnist_client.py +++ b/tensorflow_serving/example/mnist_client.py @@ -25,7 +25,7 @@ mnist_client.py --num_tests=100 --server=localhost:9000 """ -from __future__ import print_function +from __future__ import print_function # noqa: I001, UP010 import sys import threading @@ -48,7 +48,7 @@ FLAGS = tf.compat.v1.app.flags.FLAGS -class _ResultCounter(object): +class _ResultCounter(object): # noqa: UP004 """Counter for the prediction results.""" def __init__(self, num_tests, concurrency): @@ -94,7 +94,7 @@ def _create_rpc_callback(label, result_counter): result_counter: Counter for the prediction result. Returns: The callback function. - """ + """ # noqa: D410, D411 def _callback(result_future): """Callback function. diff --git a/tensorflow_serving/example/mnist_input_data.py b/tensorflow_serving/example/mnist_input_data.py index 3e8021a2435..894d3bbc481 100644 --- a/tensorflow_serving/example/mnist_input_data.py +++ b/tensorflow_serving/example/mnist_input_data.py @@ -17,7 +17,7 @@ """Functions for downloading and reading MNIST data.""" -from __future__ import print_function +from __future__ import print_function # noqa: UP010 import gzip import os @@ -42,7 +42,7 @@ def maybe_download(filename, work_directory): if not os.path.exists(filepath): filepath, _ = urllib.request.urlretrieve(SOURCE_URL + filename, filepath) statinfo = os.stat(filepath) - print('Successfully downloaded %s %d bytes.' % (filename, statinfo.st_size)) + print('Successfully downloaded %s %d bytes.' % (filename, statinfo.st_size)) # noqa: UP031 return filepath @@ -53,12 +53,12 @@ def _read32(bytestream): def extract_images(filename): """Extract the images into a 4D uint8 numpy array [index, y, x, depth].""" - print('Extracting %s' % filename) + print('Extracting %s' % filename) # noqa: UP031 with gzip.open(filename) as bytestream: magic = _read32(bytestream) if magic != 2051: raise ValueError( - 'Invalid magic number %d in MNIST image file: %s' % + 'Invalid magic number %d in MNIST image file: %s' % # noqa: UP031 (magic, filename)) num_images = _read32(bytestream) rows = _read32(bytestream) @@ -66,7 +66,7 @@ def extract_images(filename): buf = bytestream.read(rows * cols * num_images) data = numpy.frombuffer(buf, dtype=numpy.uint8) data = data.reshape(num_images, rows, cols, 1) - return data + return data # noqa: RET504 def dense_to_one_hot(labels_dense, num_classes=10): @@ -80,12 +80,12 @@ def dense_to_one_hot(labels_dense, num_classes=10): def extract_labels(filename, one_hot=False): """Extract the labels into a 1D uint8 numpy array [index].""" - print('Extracting %s' % filename) + print('Extracting %s' % filename) # noqa: UP031 with gzip.open(filename) as bytestream: magic = _read32(bytestream) if magic != 2049: raise ValueError( - 'Invalid magic number %d in MNIST label file: %s' % + 'Invalid magic number %d in MNIST label file: %s' % # noqa: UP031 (magic, filename)) num_items = _read32(bytestream) buf = bytestream.read(num_items) @@ -95,18 +95,18 @@ def extract_labels(filename, one_hot=False): return labels -class DataSet(object): +class DataSet(object): # noqa: UP004 """Class encompassing test, validation and training MNIST data set.""" def __init__(self, images, labels, fake_data=False, one_hot=False): - """Construct a DataSet. one_hot arg is used only if fake_data is true.""" + """Construct a DataSet. one_hot arg is used only if fake_data is true.""" # noqa: D202 if fake_data: self._num_examples = 10000 self.one_hot = one_hot else: assert images.shape[0] == labels.shape[0], ( - 'images.shape: %s labels.shape: %s' % (images.shape, + 'images.shape: %s labels.shape: %s' % (images.shape, # noqa: UP031 labels.shape)) self._num_examples = images.shape[0] @@ -157,7 +157,7 @@ def next_batch(self, batch_size, fake_data=False): self._epochs_completed += 1 # Shuffle the data perm = numpy.arange(self._num_examples) - numpy.random.shuffle(perm) + numpy.random.shuffle(perm) # noqa: NPY002 self._images = self._images[perm] self._labels = self._labels[perm] # Start next epoch @@ -171,7 +171,7 @@ def next_batch(self, batch_size, fake_data=False): def read_data_sets(train_dir, fake_data=False, one_hot=False): """Return training, validation and testing data sets.""" - class DataSets(object): + class DataSets(object): # noqa: UP004 pass data_sets = DataSets() diff --git a/tensorflow_serving/example/mnist_saved_model.py b/tensorflow_serving/example/mnist_saved_model.py index 0bb3053893c..685dcbaa1e9 100644 --- a/tensorflow_serving/example/mnist_saved_model.py +++ b/tensorflow_serving/example/mnist_saved_model.py @@ -25,7 +25,7 @@ export_dir """ -from __future__ import print_function +from __future__ import print_function # noqa: I001, UP010 import os import sys @@ -85,7 +85,7 @@ def main(_): train_step.run(feed_dict={x: batch[0], y_: batch[1]}) correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1)) accuracy = tf.math.reduce_mean(tf.cast(correct_prediction, 'float')) - print('training accuracy %g' % sess.run( + print('training accuracy %g' % sess.run( # noqa: UP031 accuracy, feed_dict={ x: mnist.test.images, y_: mnist.test.labels diff --git a/tensorflow_serving/example/resnet_client.py b/tensorflow_serving/example/resnet_client.py index b531166a91d..3a53be21023 100644 --- a/tensorflow_serving/example/resnet_client.py +++ b/tensorflow_serving/example/resnet_client.py @@ -27,7 +27,7 @@ resnet_client.py """ -from __future__ import print_function +from __future__ import print_function # noqa: I001, UP010 import base64 import io @@ -57,7 +57,7 @@ def main(): if MODEL_ACCEPT_JPG: # Compose a JSON Predict request (send JPEG image in base64). jpeg_bytes = base64.b64encode(dl_request.content).decode('utf-8') - predict_request = '{"instances" : [{"b64": "%s"}]}' % jpeg_bytes + predict_request = '{"instances" : [{"b64": "%s"}]}' % jpeg_bytes # noqa: UP031 else: # Compose a JOSN Predict request (send the image tensor). jpeg_rgb = Image.open(io.BytesIO(dl_request.content)) @@ -79,7 +79,7 @@ def main(): total_time += response.elapsed.total_seconds() prediction = response.json()['predictions'][0] - print('Prediction class: {}, avg latency: {} ms'.format( + print('Prediction class: {}, avg latency: {} ms'.format( # noqa: UP032 np.argmax(prediction), (total_time * 1000) / num_requests)) diff --git a/tensorflow_serving/example/resnet_client_grpc.py b/tensorflow_serving/example/resnet_client_grpc.py index 96e22ee8d2c..d1acc168fe2 100644 --- a/tensorflow_serving/example/resnet_client_grpc.py +++ b/tensorflow_serving/example/resnet_client_grpc.py @@ -14,9 +14,9 @@ # ============================================================================== """Send JPEG image to tensorflow_model_server loaded with ResNet model. -""" +""" # noqa: D200 -from __future__ import print_function +from __future__ import print_function # noqa: I001, UP010 import io @@ -71,7 +71,7 @@ def main(_): tf.make_tensor_proto(data)) result = stub.Predict(request, 10.0) # 10 secs timeout result = result.outputs['activation_49'].float_val - print('Prediction class: {}'.format(np.argmax(result))) + print('Prediction class: {}'.format(np.argmax(result))) # noqa: UP032 if __name__ == '__main__': diff --git a/tensorflow_serving/example/resnet_warmup.py b/tensorflow_serving/example/resnet_warmup.py index c7a6af151fb..2399abc59ff 100644 --- a/tensorflow_serving/example/resnet_warmup.py +++ b/tensorflow_serving/example/resnet_warmup.py @@ -29,9 +29,9 @@ Usage example: python resnet_warmup.py saved_model_dir -""" +""" # noqa: D208 -from __future__ import print_function +from __future__ import print_function # noqa: I001, UP010 import io import os @@ -64,7 +64,7 @@ def main(): model_dir = sys.argv[-1] if not os.path.isdir(model_dir): - print('The saved model directory: %s does not exist. ' + print('The saved model directory: %s does not exist. ' # noqa: UP031 'Specify the path of an existing model.' % model_dir) sys.exit(-1) @@ -100,7 +100,7 @@ def main(): predict_log=prediction_log_pb2.PredictLog(request=request)) writer.write(log.SerializeToString()) - print('Created the file \'%s\', restart tensorflow_model_server to warmup ' + print('Created the file \'%s\', restart tensorflow_model_server to warmup ' # noqa: UP031 'the ResNet SavedModel.' % warmup_file) if __name__ == '__main__': diff --git a/tensorflow_serving/experimental/example/half_plus_two_with_rpop.py b/tensorflow_serving/experimental/example/half_plus_two_with_rpop.py index 07fe685d139..db9986d77e1 100644 --- a/tensorflow_serving/experimental/example/half_plus_two_with_rpop.py +++ b/tensorflow_serving/experimental/example/half_plus_two_with_rpop.py @@ -27,7 +27,7 @@ RemotePredictOp. """ -import tensorflow.compat.v1 as tf +import tensorflow.compat.v1 as tf # noqa: I001 from tensorflow_serving.experimental.tensorflow.ops.remote_predict.python.ops import remote_predict_ops @@ -127,7 +127,7 @@ def main(_): FLAGS.target_address, FLAGS.remote_model_name) print( - "SavedModel generated at: %(dir)s with target_address: %(target_address)s" + "SavedModel generated at: %(dir)s with target_address: %(target_address)s" # noqa: UP031 ", remote_model_name: %(remote_model_name)s. " % { "dir": FLAGS.output_dir, "target_address": FLAGS.target_address, diff --git a/tensorflow_serving/experimental/example/half_plus_two_with_rpop_client.py b/tensorflow_serving/experimental/example/half_plus_two_with_rpop_client.py index 261c8a029dc..3028a6aafce 100644 --- a/tensorflow_serving/experimental/example/half_plus_two_with_rpop_client.py +++ b/tensorflow_serving/experimental/example/half_plus_two_with_rpop_client.py @@ -36,7 +36,7 @@ tensorflow_model_server --port=8500 --model_config_file=/tmp/config_file.txt """ -from __future__ import print_function +from __future__ import print_function # noqa: I001, UP010 import grpc import tensorflow.compat.v1 as tf diff --git a/tensorflow_serving/experimental/example/remote_predict_client.py b/tensorflow_serving/experimental/example/remote_predict_client.py index ccf98a12c2c..bce4c1cdae1 100644 --- a/tensorflow_serving/experimental/example/remote_predict_client.py +++ b/tensorflow_serving/experimental/example/remote_predict_client.py @@ -17,7 +17,7 @@ Example client code which calls the Remote Predict Op directly. """ -from __future__ import print_function +from __future__ import print_function # noqa: I001, UP010 import tensorflow.compat.v1 as tf @@ -40,7 +40,7 @@ FLAGS = tf.app.flags.FLAGS -def main(unused_argv): +def main(unused_argv): # noqa: ARG001 print("Call remote_predict_op") results = remote_predict_ops.run( [FLAGS.input_tensor_aliases], diff --git a/tensorflow_serving/experimental/tensorflow/ops/remote_predict/__init__.py b/tensorflow_serving/experimental/tensorflow/ops/remote_predict/__init__.py index c1597decea2..5aad2d5a424 100644 --- a/tensorflow_serving/experimental/tensorflow/ops/remote_predict/__init__.py +++ b/tensorflow_serving/experimental/tensorflow/ops/remote_predict/__init__.py @@ -16,9 +16,9 @@ @@run """ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function +from __future__ import absolute_import # noqa: I001, UP010 +from __future__ import division # noqa: UP010 +from __future__ import print_function # noqa: UP010 from tensorflow.python.util.all_util import remove_undocumented from tensorflow_serving.experimental.tensorflow.ops.remote_predict.python.ops.remote_predict_ops import run diff --git a/tensorflow_serving/experimental/tensorflow/ops/remote_predict/ops/remote_predict_op.cc b/tensorflow_serving/experimental/tensorflow/ops/remote_predict/ops/remote_predict_op.cc index f6cd7548e0d..cf4059725ad 100644 --- a/tensorflow_serving/experimental/tensorflow/ops/remote_predict/ops/remote_predict_op.cc +++ b/tensorflow_serving/experimental/tensorflow/ops/remote_predict/ops/remote_predict_op.cc @@ -92,7 +92,7 @@ fail_op_on_rpc_error: If set true, the Op fails if the rpc fails, and returns Set true by default. max_rpc_deadline_millis: The rpc deadline for remote predict. The actual deadline is min(incoming_rpc_deadline, max_rpc_deadline_millis). -signature_name: the signature def for remote graph inference, defaulting to +signature_name: the signature def for remote graph inference, defaulting to "serving_default". target_address: Address of the server hosting the remote graph. model_name: Model name of the remote TF graph. @@ -107,7 +107,7 @@ output_tensor_aliases: Tensor of strings for the output tensor alias names to status_code: Returns the status code of the rpc call; basically converting tensorflow::error::Code to it's int value, so 0 means OK. status_error_message: Returns the error message in the rpc status. -output_tensors: Tensors returned by the Predict call on the remote graph, which +output_tensors: Tensors returned by the Predict call on the remote graph, which are in the same order as output_tensor_aliases. output_types: A list of types of the output tensors. Length of this list should be equal to the length of 'output_tensor_aliases'. diff --git a/tensorflow_serving/experimental/tensorflow/ops/remote_predict/python/ops/remote_predict_ops.py b/tensorflow_serving/experimental/tensorflow/ops/remote_predict/python/ops/remote_predict_ops.py index b2854503459..9e297b891ca 100644 --- a/tensorflow_serving/experimental/tensorflow/ops/remote_predict/python/ops/remote_predict_ops.py +++ b/tensorflow_serving/experimental/tensorflow/ops/remote_predict/python/ops/remote_predict_ops.py @@ -14,16 +14,16 @@ # ============================================================================== """Operations for RemotePredict.""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function +from __future__ import absolute_import # noqa: I001, UP010 +from __future__ import division # noqa: UP010 +from __future__ import print_function # noqa: UP010 import os.path import tensorflow.compat.v1 as tf from tensorflow_serving.experimental.tensorflow.ops.remote_predict.ops import gen_remote_predict_op # pylint: disable=wildcard-import -from tensorflow_serving.experimental.tensorflow.ops.remote_predict.ops.gen_remote_predict_op import * +from tensorflow_serving.experimental.tensorflow.ops.remote_predict.ops.gen_remote_predict_op import * # noqa: F403 # pylint: enable=wildcard-import _remote_predict_op_module = tf.load_op_library( diff --git a/tensorflow_serving/g3doc/saved_model_warmup.md b/tensorflow_serving/g3doc/saved_model_warmup.md index 1d4345ae758..838d0966ff8 100644 --- a/tensorflow_serving/g3doc/saved_model_warmup.md +++ b/tensorflow_serving/g3doc/saved_model_warmup.md @@ -48,5 +48,3 @@ Warmup data can be added in two ways: `YourSavedModel/assets.extra/tf_serving_warmup_requests` based on the validation requests provided via [RequestSpec](https://www.tensorflow.org/tfx/guide/infra_validator#requestspec). - - diff --git a/tensorflow_serving/model_servers/profiler_client.py b/tensorflow_serving/model_servers/profiler_client.py index c4573842980..28357d66da2 100644 --- a/tensorflow_serving/model_servers/profiler_client.py +++ b/tensorflow_serving/model_servers/profiler_client.py @@ -14,9 +14,9 @@ # ============================================================================== """Simple client to send profiling request to ModelServer.""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function +from __future__ import absolute_import # noqa: I001, UP010 +from __future__ import division # noqa: UP010 +from __future__ import print_function # noqa: UP010 import tensorflow as tf diff --git a/tensorflow_serving/model_servers/tensorflow_model_server_test.py b/tensorflow_serving/model_servers/tensorflow_model_server_test.py index 8ad8f91b153..3e7f5716212 100644 --- a/tensorflow_serving/model_servers/tensorflow_model_server_test.py +++ b/tensorflow_serving/model_servers/tensorflow_model_server_test.py @@ -15,9 +15,9 @@ """Tests for tensorflow_model_server.""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function +from __future__ import absolute_import # noqa: I001, UP010 +from __future__ import division # noqa: UP010 +from __future__ import print_function # noqa: UP010 import json import os @@ -36,7 +36,7 @@ -import grpc +import grpc # noqa: I001 from six.moves import range import tensorflow.compat.v1 as tf @@ -74,7 +74,7 @@ def __BuildModelConfigFile(self): in the configuration template file and writes it out to another file used by the test. """ - with open(self._GetGoodModelConfigTemplate(), 'r') as template_file: + with open(self._GetGoodModelConfigTemplate(), 'r') as template_file: # noqa: UP015 config = template_file.read().replace('${TEST_HALF_PLUS_TWO_DIR}', self._GetSavedModelBundlePath()) config = config.replace('${TEST_HALF_PLUS_THREE_DIR}', @@ -313,7 +313,7 @@ def testBadModelConfig(self): self.assertGreater(proc.stderr.read().find(error_message), -1) def testModelConfigReload(self): - """Test model server polls filesystem for model configuration.""" + """Test model server polls filesystem for model configuration.""" # noqa: D202 base_config_proto = """ model_config_list: {{ @@ -367,7 +367,7 @@ def testModelConfigReload(self): self._GetSavedModelHalfPlusThreePath())) def testModelConfigReloadWithZeroPollPeriod(self): - """Test model server does not poll filesystem for model config.""" + """Test model server does not poll filesystem for model config.""" # noqa: D202 base_config_proto = """ model_config_list: {{ @@ -442,7 +442,7 @@ def testClassifyREST(self): model_path)[2].split(':') # Prepare request - url = 'http://{}:{}/v1/models/default:classify'.format(host, port) + url = 'http://{}:{}/v1/models/default:classify'.format(host, port) # noqa: UP032 json_req = {'signature_name': 'classify_x_to_y', 'examples': [{'x': 2.0}]} # Send request @@ -450,7 +450,7 @@ def testClassifyREST(self): try: resp_data = tensorflow_model_server_test_base.CallREST(url, json_req) except Exception as e: # pylint: disable=broad-except - self.fail('Request failed with error: {}'.format(e)) + self.fail('Request failed with error: {}'.format(e)) # noqa: UP032 # Verify response self.assertEqual(json.loads(resp_data.decode()), {'results': [[['', 3.0]]]}) @@ -462,7 +462,7 @@ def testRegressREST(self): model_path)[2].split(':') # Prepare request - url = 'http://{}:{}/v1/models/default:regress'.format(host, port) + url = 'http://{}:{}/v1/models/default:regress'.format(host, port) # noqa: UP032 json_req = {'signature_name': 'regress_x_to_y', 'examples': [{'x': 2.0}]} # Send request @@ -470,7 +470,7 @@ def testRegressREST(self): try: resp_data = tensorflow_model_server_test_base.CallREST(url, json_req) except Exception as e: # pylint: disable=broad-except - self.fail('Request failed with error: {}'.format(e)) + self.fail('Request failed with error: {}'.format(e)) # noqa: UP032 # Verify response self.assertEqual(json.loads(resp_data.decode()), {'results': [3.0]}) @@ -482,7 +482,7 @@ def testPredictREST(self): model_path)[2].split(':') # Prepare request - url = 'http://{}:{}/v1/models/default:predict'.format(host, port) + url = 'http://{}:{}/v1/models/default:predict'.format(host, port) # noqa: UP032 json_req = {'instances': [2.0, 3.0, 4.0]} # Send request @@ -490,7 +490,7 @@ def testPredictREST(self): try: resp_data = tensorflow_model_server_test_base.CallREST(url, json_req) except Exception as e: # pylint: disable=broad-except - self.fail('Request failed with error: {}'.format(e)) + self.fail('Request failed with error: {}'.format(e)) # noqa: UP032 # Verify response self.assertEqual( @@ -503,7 +503,7 @@ def testPredictColumnarREST(self): model_path)[2].split(':') # Prepare request - url = 'http://{}:{}/v1/models/default:predict'.format(host, port) + url = 'http://{}:{}/v1/models/default:predict'.format(host, port) # noqa: UP032 json_req = {'inputs': [2.0, 3.0, 4.0]} # Send request @@ -511,7 +511,7 @@ def testPredictColumnarREST(self): try: resp_data = tensorflow_model_server_test_base.CallREST(url, json_req) except Exception as e: # pylint: disable=broad-except - self.fail('Request failed with error: {}'.format(e)) + self.fail('Request failed with error: {}'.format(e)) # noqa: UP032 # Verify response self.assertEqual( @@ -524,14 +524,14 @@ def testGetStatusREST(self): model_path)[2].split(':') # Prepare request - url = 'http://{}:{}/v1/models/default'.format(host, port) + url = 'http://{}:{}/v1/models/default'.format(host, port) # noqa: UP032 # Send request resp_data = None try: resp_data = tensorflow_model_server_test_base.CallREST(url, None) except Exception as e: # pylint: disable=broad-except - self.fail('Request failed with error: {}'.format(e)) + self.fail('Request failed with error: {}'.format(e)) # noqa: UP032 # Verify response self.assertEqual( @@ -553,14 +553,14 @@ def testGetModelMetadataREST(self): model_path)[2].split(':') # Prepare request - url = 'http://{}:{}/v1/models/default/metadata'.format(host, port) + url = 'http://{}:{}/v1/models/default/metadata'.format(host, port) # noqa: UP032 # Send request resp_data = None try: resp_data = tensorflow_model_server_test_base.CallREST(url, None) except Exception as e: # pylint: disable=broad-except - self.fail('Request failed with error: {}'.format(e)) + self.fail('Request failed with error: {}'.format(e)) # noqa: UP032 try: model_metadata_file = self._GetModelMetadataFile() @@ -576,7 +576,7 @@ def testGetModelMetadataREST(self): json.loads(resp_data.decode())), tensorflow_model_server_test_base.SortedObject(expected_metadata)) except Exception as e: # pylint: disable=broad-except - self.fail('Request failed with error: {}'.format(e)) + self.fail('Request failed with error: {}'.format(e)) # noqa: UP032 def testPrometheusEndpoint(self): """Test ModelStatus implementation over REST API with columnar inputs.""" @@ -587,14 +587,14 @@ def testPrometheusEndpoint(self): monitoring_config_file=self._GetMonitoringConfigFile())[2].split(':') # Prepare request - url = 'http://{}:{}/monitoring/prometheus/metrics'.format(host, port) + url = 'http://{}:{}/monitoring/prometheus/metrics'.format(host, port) # noqa: UP032 # Send request resp_data = None try: resp_data = tensorflow_model_server_test_base.CallREST(url, None) except Exception as e: # pylint: disable=broad-except - self.fail('Request failed with error: {}'.format(e)) + self.fail('Request failed with error: {}'.format(e)) # noqa: UP032 # Verify that there should be some metric type information. self.assertIn('# TYPE', @@ -604,7 +604,7 @@ def testPredictUDS(self): """Test saved model prediction over a Unix domain socket.""" _ = TensorflowModelServerTest.RunServer('default', self._GetSavedModelBundlePath()) - model_server_address = 'unix:%s' % GRPC_SOCKET_PATH + model_server_address = 'unix:%s' % GRPC_SOCKET_PATH # noqa: UP031 self.VerifyPredictRequest( model_server_address, expected_output=3.0, @@ -655,9 +655,9 @@ def test_tf_saved_model_save_multiple_signatures(self): base_path = os.path.join(self.get_temp_dir(), 'tf_saved_model_save') export_path = os.path.join(base_path, '00000123') root = tf.train.Checkpoint() - root.f = tf.function(lambda x: {'y': 1.}, + root.f = tf.function(lambda x: {'y': 1.}, # noqa: ARG005 input_signature=[tf.TensorSpec(None, tf.float32)]) - root.g = tf.function(lambda x: {'y': 2.}, + root.g = tf.function(lambda x: {'y': 2.}, # noqa: ARG005 input_signature=[tf.TensorSpec(None, tf.float32)]) tf.saved_model.experimental.save( root, export_path, @@ -734,7 +734,7 @@ def test_distrat_sequential_keras_saved_model_save(self): expected_version=expected_version) def test_profiler_service_with_valid_trace_request(self): - """Test integration with profiler service by sending tracing requests.""" + """Test integration with profiler service by sending tracing requests.""" # noqa: D202 # Start model server model_path = self._GetSavedModelBundlePath() @@ -742,14 +742,14 @@ def test_profiler_service_with_valid_trace_request(self): 'default', model_path) # Prepare predict request - url = 'http://{}/v1/models/default:predict'.format(rest_addr) + url = 'http://{}/v1/models/default:predict'.format(rest_addr) # noqa: UP032 json_req = '{"instances": [2.0, 3.0, 4.0]}' # In a subprocess, send a REST predict request every second for 3 seconds - exec_command = ("wget {} --content-on-error=on -O- --post-data '{}' " + exec_command = ("wget {} --content-on-error=on -O- --post-data '{}' " # noqa: UP032 "--header='Content-Type:application/json'").format( url, json_req) - repeat_command = 'for n in {{1..3}}; do {} & sleep 1; done;'.format( + repeat_command = 'for n in {{1..3}}; do {} & sleep 1; done;'.format( # noqa: UP032 exec_command) proc = subprocess.Popen( repeat_command, @@ -770,7 +770,7 @@ def test_profiler_service_with_valid_trace_request(self): # Log stdout & stderr of subprocess issuing predict requests for debugging out, err = proc.communicate() - print("stdout: '{}' | stderr: '{}'".format(out, err)) + print("stdout: '{}' | stderr: '{}'".format(out, err)) # noqa: UP032 def test_tf_text(self): """Test TF Text.""" diff --git a/tensorflow_serving/model_servers/tensorflow_model_server_test_client.py b/tensorflow_serving/model_servers/tensorflow_model_server_test_client.py index 11274737ec7..1a0a8949950 100644 --- a/tensorflow_serving/model_servers/tensorflow_model_server_test_client.py +++ b/tensorflow_serving/model_servers/tensorflow_model_server_test_client.py @@ -14,9 +14,9 @@ # ============================================================================== """Manual test client for tensorflow_model_server.""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function +from __future__ import absolute_import # noqa: I001, UP010 +from __future__ import division # noqa: UP010 +from __future__ import print_function # noqa: UP010 import grpc import tensorflow as tf diff --git a/tensorflow_serving/model_servers/test_util/tensorflow_model_server_test_base.py b/tensorflow_serving/model_servers/test_util/tensorflow_model_server_test_base.py index 1a0b250a12d..7df6f6d264b 100644 --- a/tensorflow_serving/model_servers/test_util/tensorflow_model_server_test_base.py +++ b/tensorflow_serving/model_servers/test_util/tensorflow_model_server_test_base.py @@ -15,9 +15,9 @@ """Tests for tensorflow_model_server.""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function +from __future__ import absolute_import # noqa: I001, UP010 +from __future__ import division # noqa: UP010 +from __future__ import print_function # noqa: UP010 import atexit import json @@ -50,7 +50,7 @@ def SetVirtualCpus(num_virtual_cpus): """Create virtual CPU devices if they haven't yet been created.""" if num_virtual_cpus < 1: - raise ValueError('`num_virtual_cpus` must be at least 1 not %r' % + raise ValueError('`num_virtual_cpus` must be at least 1 not %r' % # noqa: UP031 (num_virtual_cpus,)) physical_devices = tf.config.experimental.list_physical_devices('CPU') if not physical_devices: @@ -64,7 +64,7 @@ def SetVirtualCpus(num_virtual_cpus): physical_devices[0], virtual_devices) else: if len(configs) < num_virtual_cpus: - raise RuntimeError('Already configured with %d < %d virtual CPUs' % + raise RuntimeError('Already configured with %d < %d virtual CPUs' % # noqa: UP031 (len(configs), num_virtual_cpus)) @@ -85,7 +85,7 @@ def WaitForServerReady(port): try: # Send empty request to missing model - channel = grpc.insecure_channel('localhost:{}'.format(port)) + channel = grpc.insecure_channel('localhost:{}'.format(port)) # noqa: UP032 stub = prediction_service_pb2_grpc.PredictionServiceStub(channel) stub.Predict(request, RPC_TIMEOUT) except grpc.RpcError as error: @@ -99,16 +99,16 @@ def CallREST(url, req, max_attempts=60): """Returns HTTP response body from a REST API call.""" for attempt in range(max_attempts): try: - print('Attempt {}: Sending request to {} with data:\n{}'.format( + print('Attempt {}: Sending request to {} with data:\n{}'.format( # noqa: UP032 attempt, url, req)) json_data = json.dumps(req).encode('utf-8') if req is not None else None resp = urllib.request.urlopen(urllib.request.Request(url, data=json_data)) resp_data = resp.read() - print('Received response:\n{}'.format(resp_data)) + print('Received response:\n{}'.format(resp_data)) # noqa: UP032 resp.close() return resp_data except Exception as e: # pylint: disable=broad-except - print('Failed attempt {}. Error: {}'.format(attempt, e)) + print('Failed attempt {}. Error: {}'.format(attempt, e)) # noqa: UP032 if attempt == max_attempts - 1: raise print('Retrying...') @@ -123,7 +123,7 @@ def SortedObject(obj): return sorted(SortedObject(x) for x in obj) if isinstance(obj, tuple): return list(sorted(SortedObject(x) for x in obj)) - else: + else: # noqa: RET505 return obj @@ -188,7 +188,7 @@ def RunServer( return TensorflowModelServerTestBase.model_servers_dict[args_key] port = PickUnusedPort() rest_api_port = PickUnusedPort() - print(('Starting test server on port: {} for model_name: ' + print(('Starting test server on port: {} for model_name: ' # noqa: UP032, UP034 '{}/model_config_file: {}'.format(port, model_name, model_config_file))) diff --git a/tensorflow_serving/servables/tensorflow/testdata/bad_model_config.txt b/tensorflow_serving/servables/tensorflow/testdata/bad_model_config.txt index 6649d3fd44a..887d07c4356 100644 --- a/tensorflow_serving/servables/tensorflow/testdata/bad_model_config.txt +++ b/tensorflow_serving/servables/tensorflow/testdata/bad_model_config.txt @@ -1 +1 @@ -improperly formatted file \ No newline at end of file +improperly formatted file diff --git a/tensorflow_serving/servables/tensorflow/testdata/export_bad_half_plus_two.py b/tensorflow_serving/servables/tensorflow/testdata/export_bad_half_plus_two.py index b5b7bde74c7..980223aa702 100644 --- a/tensorflow_serving/servables/tensorflow/testdata/export_bad_half_plus_two.py +++ b/tensorflow_serving/servables/tensorflow/testdata/export_bad_half_plus_two.py @@ -34,7 +34,7 @@ def Export(): # Calculate, y = a*x + b # here we use a placeholder 'x' which is fed at inference time. x = tf.placeholder(tf.float32) - y = tf.add(tf.multiply(a, x), b) + y = tf.add(tf.multiply(a, x), b) # noqa: F841 # Export the model without signatures. # Note that the model is intentionally exported without using exporter, diff --git a/tensorflow_serving/servables/tensorflow/testdata/export_counter.py b/tensorflow_serving/servables/tensorflow/testdata/export_counter.py index c4ada219c92..22651e1f8f3 100644 --- a/tensorflow_serving/servables/tensorflow/testdata/export_counter.py +++ b/tensorflow_serving/servables/tensorflow/testdata/export_counter.py @@ -18,9 +18,9 @@ reset_counter, to test Predict service. """ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function +from __future__ import absolute_import # noqa: I001, UP010 +from __future__ import division # noqa: UP010 +from __future__ import print_function # noqa: UP010 import tensorflow as tf @@ -97,7 +97,7 @@ def export_model(output_dir): save_model(sess, signature_def_map, output_dir) -def main(unused_argv): +def main(unused_argv): # noqa: ARG001 export_model("/tmp/saved_model_counter/00000123") diff --git a/tensorflow_serving/servables/tensorflow/testdata/parse_example_tflite.README b/tensorflow_serving/servables/tensorflow/testdata/parse_example_tflite.README index d3fcb3d8d85..1b6a59f2654 100644 --- a/tensorflow_serving/servables/tensorflow/testdata/parse_example_tflite.README +++ b/tensorflow_serving/servables/tensorflow/testdata/parse_example_tflite.README @@ -19,4 +19,3 @@ and is updated using bazel run -c opt parse_example_tflite_with_string cp /tmp/parse_example_tflite parse_example_tflite/00000123/model.tflite ``` - diff --git a/tensorflow_serving/servables/tensorflow/testdata/parse_example_tflite.py b/tensorflow_serving/servables/tensorflow/testdata/parse_example_tflite.py index 17790e90b2d..6a9bd36536c 100644 --- a/tensorflow_serving/servables/tensorflow/testdata/parse_example_tflite.py +++ b/tensorflow_serving/servables/tensorflow/testdata/parse_example_tflite.py @@ -30,7 +30,7 @@ To create a model: bazel run -c opt parse_example_tflite_with_string """ -import argparse +import argparse # noqa: I001 import sys import tensorflow.compat.v1 as tf @@ -87,12 +87,12 @@ def _generate_tflite_for_parse_example_with_string(export_dir): k = tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY tflite_model = signature_def_utils.set_signature_defs( tflite_model, {k: predict_signature_def}) - open(export_dir + "/model.tflite", "wb").write(tflite_model) + open(export_dir + "/model.tflite", "wb").write(tflite_model) # noqa: SIM115 def main(_): _generate_tflite_for_parse_example_with_string(FLAGS.output_dir) - print("TFLite model generated at: %(dir)s" % { + print("TFLite model generated at: %(dir)s" % { # noqa: UP031 "dir": FLAGS.output_dir }) diff --git a/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_three/00000123/assets/foo.txt b/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_three/00000123/assets/foo.txt index f9ff0366880..56c3e54a43f 100644 --- a/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_three/00000123/assets/foo.txt +++ b/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_three/00000123/assets/foo.txt @@ -1 +1 @@ -asset-file-contents \ No newline at end of file +asset-file-contents diff --git a/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two.py b/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two.py index fe05c062cf0..8c64a5f0123 100644 --- a/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two.py +++ b/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two.py @@ -45,9 +45,9 @@ --device=gpu """ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function +from __future__ import absolute_import # noqa: I001, UP010 +from __future__ import division # noqa: UP010 +from __future__ import print_function # noqa: UP010 import argparse import os @@ -199,7 +199,7 @@ def get_serving_signatures(self): } @tf.function(input_signature=[tf.TensorSpec(shape=[1], dtype=tf.float32)]) - def predict(self, x=tf.constant([0], shape=[1], dtype=tf.float32)): + def predict(self, x=tf.constant([0], shape=[1], dtype=tf.float32)): # noqa: B008 return {"y": self.compute(x, self.b)} @tf.function( @@ -386,7 +386,7 @@ def _generate_saved_model_for_half_plus_two( k = tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY tflite_model = signature_def_utils.set_signature_defs( tflite_model, {k: predict_signature_def}) - open(export_dir + "/model.tflite", "wb").write(tflite_model) + open(export_dir + "/model.tflite", "wb").write(tflite_model) # noqa: SIM115 else: if use_main_op: builder.add_meta_graph_and_variables( @@ -412,45 +412,45 @@ def _generate_saved_model_for_half_plus_two( def main(_): _generate_saved_model_for_half_plus_two( FLAGS.output_dir, device_type=FLAGS.device) - print("SavedModel generated for %(device)s at: %(dir)s" % { + print("SavedModel generated for %(device)s at: %(dir)s" % { # noqa: UP031 "device": FLAGS.device, "dir": FLAGS.output_dir }) _generate_saved_model_for_half_plus_two( - "%s_%s" % (FLAGS.output_dir_tf2, FLAGS.device), + "%s_%s" % (FLAGS.output_dir_tf2, FLAGS.device), # noqa: UP031 tf2=True, device_type=FLAGS.device) print( - "SavedModel TF2 generated for %(device)s at: %(dir)s" % { + "SavedModel TF2 generated for %(device)s at: %(dir)s" % { # noqa: UP031 "device": FLAGS.device, - "dir": "%s_%s" % (FLAGS.output_dir_tf2, FLAGS.device), + "dir": "%s_%s" % (FLAGS.output_dir_tf2, FLAGS.device), # noqa: UP031 }) _generate_saved_model_for_half_plus_two( FLAGS.output_dir_pbtxt, as_text=True, device_type=FLAGS.device) - print("SavedModel generated for %(device)s at: %(dir)s" % { + print("SavedModel generated for %(device)s at: %(dir)s" % { # noqa: UP031 "device": FLAGS.device, "dir": FLAGS.output_dir_pbtxt }) _generate_saved_model_for_half_plus_two( FLAGS.output_dir_main_op, use_main_op=True, device_type=FLAGS.device) - print("SavedModel generated for %(device)s at: %(dir)s " % { + print("SavedModel generated for %(device)s at: %(dir)s " % { # noqa: UP031 "device": FLAGS.device, "dir": FLAGS.output_dir_main_op }) _generate_saved_model_for_half_plus_two( FLAGS.output_dir_tflite, as_tflite=True, device_type=FLAGS.device) - print("SavedModel in TFLite format generated for %(device)s at: %(dir)s " % { + print("SavedModel in TFLite format generated for %(device)s at: %(dir)s " % { # noqa: UP031 "device": FLAGS.device, "dir": FLAGS.output_dir_tflite, }) _generate_saved_model_for_half_plus_two( FLAGS.output_dir_mlmd, include_mlmd=True, device_type=FLAGS.device) - print("SavedModel with MLMD generated for %(device)s at: %(dir)s " % { + print("SavedModel with MLMD generated for %(device)s at: %(dir)s " % { # noqa: UP031 "device": FLAGS.device, "dir": FLAGS.output_dir_mlmd, }) @@ -458,7 +458,7 @@ def main(_): _generate_saved_model_for_half_plus_two( FLAGS.output_dir_tflite_with_sigdef, device_type=FLAGS.device, as_tflite_with_sigdef=True) - print("SavedModel in TFLite format with SignatureDef generated for " + print("SavedModel in TFLite format with SignatureDef generated for " # noqa: UP031 "%(device)s at: %(dir)s " % { "device": FLAGS.device, "dir": FLAGS.output_dir_tflite_with_sigdef, diff --git a/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_2_versions/00000123/assets/foo.txt b/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_2_versions/00000123/assets/foo.txt index f9ff0366880..56c3e54a43f 100644 --- a/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_2_versions/00000123/assets/foo.txt +++ b/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_2_versions/00000123/assets/foo.txt @@ -1 +1 @@ -asset-file-contents \ No newline at end of file +asset-file-contents diff --git a/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_2_versions/00000124/assets/foo.txt b/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_2_versions/00000124/assets/foo.txt index f9ff0366880..56c3e54a43f 100644 --- a/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_2_versions/00000124/assets/foo.txt +++ b/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_2_versions/00000124/assets/foo.txt @@ -1 +1 @@ -asset-file-contents \ No newline at end of file +asset-file-contents diff --git a/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_cpu/00000123/assets/foo.txt b/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_cpu/00000123/assets/foo.txt index f9ff0366880..56c3e54a43f 100644 --- a/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_cpu/00000123/assets/foo.txt +++ b/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_cpu/00000123/assets/foo.txt @@ -1 +1 @@ -asset-file-contents \ No newline at end of file +asset-file-contents diff --git a/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_gpu_trt/00000123/assets/foo.txt b/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_gpu_trt/00000123/assets/foo.txt index f9ff0366880..56c3e54a43f 100644 --- a/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_gpu_trt/00000123/assets/foo.txt +++ b/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_gpu_trt/00000123/assets/foo.txt @@ -1 +1 @@ -asset-file-contents \ No newline at end of file +asset-file-contents diff --git a/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_mkl/00000123/assets/foo.txt b/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_mkl/00000123/assets/foo.txt index f9ff0366880..56c3e54a43f 100644 --- a/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_mkl/00000123/assets/foo.txt +++ b/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_mkl/00000123/assets/foo.txt @@ -1 +1 @@ -asset-file-contents \ No newline at end of file +asset-file-contents diff --git a/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_mlmd/00000123/assets.extra/mlmd_uuid b/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_mlmd/00000123/assets.extra/mlmd_uuid index afc2417480e..cffaea71d35 100644 --- a/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_mlmd/00000123/assets.extra/mlmd_uuid +++ b/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_mlmd/00000123/assets.extra/mlmd_uuid @@ -1 +1 @@ -test_mlmd_uuid \ No newline at end of file +test_mlmd_uuid diff --git a/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_mlmd/00000123/assets/foo.txt b/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_mlmd/00000123/assets/foo.txt index f9ff0366880..56c3e54a43f 100644 --- a/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_mlmd/00000123/assets/foo.txt +++ b/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_mlmd/00000123/assets/foo.txt @@ -1 +1 @@ -asset-file-contents \ No newline at end of file +asset-file-contents diff --git a/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_tf2_cpu/00000123/assets/foo.txt b/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_tf2_cpu/00000123/assets/foo.txt index f9ff0366880..56c3e54a43f 100644 --- a/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_tf2_cpu/00000123/assets/foo.txt +++ b/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_tf2_cpu/00000123/assets/foo.txt @@ -1 +1 @@ -asset-file-contents \ No newline at end of file +asset-file-contents diff --git a/tensorflow_serving/servables/tensorflow/testdata/tf_text_regression.README b/tensorflow_serving/servables/tensorflow/testdata/tf_text_regression.README index faf81a58f1a..038352e0c66 100644 --- a/tensorflow_serving/servables/tensorflow/testdata/tf_text_regression.README +++ b/tensorflow_serving/servables/tensorflow/testdata/tf_text_regression.README @@ -16,4 +16,3 @@ This model is used to test the integration with TF Text, and is updated using this script: https://github.com/tensorflow/text/blob/master/oss_scripts/model_server/save_models.py - diff --git a/tensorflow_serving/tools/docker/Dockerfile.devel-gpu b/tensorflow_serving/tools/docker/Dockerfile.devel-gpu index d33bca9642b..862ec1cd0a6 100644 --- a/tensorflow_serving/tools/docker/Dockerfile.devel-gpu +++ b/tensorflow_serving/tools/docker/Dockerfile.devel-gpu @@ -187,4 +187,3 @@ FROM binary_build as clean_build RUN bazel clean --expunge --color=yes && \ rm -rf /root/.cache CMD ["/bin/bash"] - diff --git a/tensorflow_serving/tools/docker/Dockerfile.gpu b/tensorflow_serving/tools/docker/Dockerfile.gpu index 80b210d5058..fe92f8ab636 100644 --- a/tensorflow_serving/tools/docker/Dockerfile.gpu +++ b/tensorflow_serving/tools/docker/Dockerfile.gpu @@ -87,4 +87,3 @@ tensorflow_model_server --port=8500 --rest_api_port=8501 \ && chmod +x /usr/bin/tf_serving_entrypoint.sh ENTRYPOINT ["/usr/bin/tf_serving_entrypoint.sh"] - diff --git a/tensorflow_serving/tools/pip_package/setup.py b/tensorflow_serving/tools/pip_package/setup.py index d7b21224ae7..43fa34883e8 100644 --- a/tensorflow_serving/tools/pip_package/setup.py +++ b/tensorflow_serving/tools/pip_package/setup.py @@ -23,7 +23,7 @@ This package contains the TensorFlow Serving Python APIs. """ -import sys +import sys # noqa: I001 from setuptools import find_packages from setuptools import setup @@ -98,4 +98,7 @@ 'Topic :: Software Development :: Libraries', 'Topic :: Software Development :: Libraries :: Python Modules', ], + extras_require={ + 'lint': ['pre-commit'], + }, ) diff --git a/tensorflow_serving/util/net_http/client/test_client/public/httpclient.h b/tensorflow_serving/util/net_http/client/test_client/public/httpclient.h index 52b624d9c07..14cb4e51cff 100644 --- a/tensorflow_serving/util/net_http/client/test_client/public/httpclient.h +++ b/tensorflow_serving/util/net_http/client/test_client/public/httpclient.h @@ -46,4 +46,3 @@ inline std::unique_ptr CreateEvHTTPConnection( } // namespace tensorflow #endif // THIRD_PARTY_TENSORFLOW_SERVING_UTIL_NET_HTTP_CLIENT_TEST_CLIENT_PUBLIC_HTTPCLIENT_H_ - diff --git a/tools/gen_status_stamp.sh b/tools/gen_status_stamp.sh index f8c840d6d8d..32a73498caa 100755 --- a/tools/gen_status_stamp.sh +++ b/tools/gen_status_stamp.sh @@ -35,5 +35,3 @@ if [ -d .git ] || git rev-parse --git-dir > /dev/null 2>&1; then else echo "BUILD_SCM_REVISION no_git" fi; - -