From 33948caf4d35f647828c5ee553cac12df2fe9431 Mon Sep 17 00:00:00 2001 From: Nick Hristov Date: Sun, 12 Oct 2014 18:23:06 -0700 Subject: [PATCH 01/20] Reverting to 2.8.1 for local usage. --- gradle.properties | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/gradle.properties b/gradle.properties index 2a70dfe7..1354b956 100644 --- a/gradle.properties +++ b/gradle.properties @@ -1,4 +1,4 @@ -version=0.6.82 -defaultScalaVersion=2.10.3 -targetScalaVersions=2.10.3 +version=0.6.62 +defaultScalaVersion=2.8.1 +targetScalaVersions=2.8.1 crossBuild=false From 5a5590456fd72f1843612919afaa4c51c16e8a85 Mon Sep 17 00:00:00 2001 From: Nick Hristov Date: Sun, 12 Oct 2014 18:41:26 -0700 Subject: [PATCH 02/20] Class cast exception bugfix. --- .../network/ConsistentHashPartitionedLoadBalancer.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/java-network/src/main/java/com/linkedin/norbert/javacompat/network/ConsistentHashPartitionedLoadBalancer.java b/java-network/src/main/java/com/linkedin/norbert/javacompat/network/ConsistentHashPartitionedLoadBalancer.java index 939f84ed..320bbdc5 100644 --- a/java-network/src/main/java/com/linkedin/norbert/javacompat/network/ConsistentHashPartitionedLoadBalancer.java +++ b/java-network/src/main/java/com/linkedin/norbert/javacompat/network/ConsistentHashPartitionedLoadBalancer.java @@ -120,7 +120,7 @@ public Node nextNode(PartitionedId partitionedId) public Node nextNode(PartitionedId partitionedId, Long capability, Long persistentCapability) { long hash = _hashFunction.hash(partitionedId.toString()); - long partitionId = (int)(Math.abs(hash) % _rings.size()); + int partitionId = (int)(Math.abs(hash) % _rings.size()); NavigableMap ring = _rings.get(partitionId); Endpoint endpoint = searchWheel(ring, hash, new Function() { @Override From e9f0dd6353bc590ead2b4884f1fa6c24df2c410f Mon Sep 17 00:00:00 2001 From: Nick Hristov Date: Wed, 15 Oct 2014 09:35:28 -0700 Subject: [PATCH 03/20] Outer ring lookup bugfix. --- ...ConsistentHashPartitionedLoadBalancer.java | 12 +- ...istentHashPartitionedLoadBalancerTest.java | 132 ++++++++++++++++++ 2 files changed, 139 insertions(+), 5 deletions(-) create mode 100644 java-network/src/test/java/com/linkedin/norbert/javacompat/network/ConsistentHashPartitionedLoadBalancerTest.java diff --git a/java-network/src/main/java/com/linkedin/norbert/javacompat/network/ConsistentHashPartitionedLoadBalancer.java b/java-network/src/main/java/com/linkedin/norbert/javacompat/network/ConsistentHashPartitionedLoadBalancer.java index 320bbdc5..97f1e178 100644 --- a/java-network/src/main/java/com/linkedin/norbert/javacompat/network/ConsistentHashPartitionedLoadBalancer.java +++ b/java-network/src/main/java/com/linkedin/norbert/javacompat/network/ConsistentHashPartitionedLoadBalancer.java @@ -1,6 +1,5 @@ package com.linkedin.norbert.javacompat.network; -import com.linkedin.norbert.javacompat.cluster.Node; import java.util.AbstractMap; import java.util.HashMap; import java.util.HashSet; @@ -9,16 +8,18 @@ import java.util.Set; import java.util.TreeMap; +import com.linkedin.norbert.javacompat.cluster.Node; + public class ConsistentHashPartitionedLoadBalancer implements PartitionedLoadBalancer { private final HashFunction _hashFunction; - private final Map> _rings; + private final NavigableMap> _rings; private final TreeMap>> _routingMap; public ConsistentHashPartitionedLoadBalancer(HashFunction hashFunction, - Map> rings, + NavigableMap> rings, TreeMap>> routingMap, PartitionedLoadBalancer fallThrough) { this._hashFunction = hashFunction; @@ -50,7 +51,7 @@ public static ConsistentHashPartitionedLoadBalancer> rings = new TreeMap>(); + NavigableMap> rings = new TreeMap>(); for (Map.Entry> entry : partitionNodes.entrySet()) { Integer partId = entry.getKey(); @@ -121,7 +122,8 @@ public Node nextNode(PartitionedId partitionedId, Long capability, Long persiste { long hash = _hashFunction.hash(partitionedId.toString()); int partitionId = (int)(Math.abs(hash) % _rings.size()); - NavigableMap ring = _rings.get(partitionId); + Map.Entry> ringEntry = lookup(_rings, partitionId); + NavigableMap ring = ringEntry.getValue(); Endpoint endpoint = searchWheel(ring, hash, new Function() { @Override public Boolean apply(Endpoint key) { diff --git a/java-network/src/test/java/com/linkedin/norbert/javacompat/network/ConsistentHashPartitionedLoadBalancerTest.java b/java-network/src/test/java/com/linkedin/norbert/javacompat/network/ConsistentHashPartitionedLoadBalancerTest.java new file mode 100644 index 00000000..8cc2d995 --- /dev/null +++ b/java-network/src/test/java/com/linkedin/norbert/javacompat/network/ConsistentHashPartitionedLoadBalancerTest.java @@ -0,0 +1,132 @@ +/* + * Copyright Tidemark Systems, Inc. All rights reserved. + * Tidemark Systems Confidential and Proprietary Information. Not for external distribution, use or sale. + * Tidemark Systems software is exclusively licensed according to the terms of our Software License and Services Agreement. + */ +package com.linkedin.norbert.javacompat.network; + +import java.util.HashSet; +import java.util.Set; + +import com.linkedin.norbert.javacompat.cluster.JavaNode; +import com.linkedin.norbert.javacompat.cluster.Node; +import org.junit.Assert; +import org.junit.Test; +import scala.Option; + +/** + * A unit test for the javacompat ConsistentHashPartitionedLoadBalancer. + */ +public class ConsistentHashPartitionedLoadBalancerTest { + + private static class TestEndpoint implements Endpoint { + + private final Node node; + private final boolean canServeRequests; + + public TestEndpoint(Node node, boolean canServeRequests) { + this.node = node; + this.canServeRequests = canServeRequests; + } + + @Override + public Node getNode() { + return node; + } + + @Override + public boolean canServeRequests() { + return canServeRequests; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + TestEndpoint that = (TestEndpoint) o; + + if (!node.equals(that.node)) return false; + + return true; + } + + @Override + public int hashCode() { + return node.hashCode(); + } + } + + + @Test + public void testSingleNode() { + + // simplest test case, make sure we can find the node to route to + + Set testEndpoints = new HashSet(); + Set partitionsNodeOne = new HashSet(); + partitionsNodeOne.add(1); + + Node nodeOne = new JavaNode(1, "localhost:9000", true, partitionsNodeOne, Option.empty(), Option.empty()); + Endpoint endpointOne = new TestEndpoint(nodeOne, true); + testEndpoints.add(endpointOne); + ConsistentHashPartitionedLoadBalancer loadBalancer = ConsistentHashPartitionedLoadBalancer.build( + 1, + new HashFunction.MD5HashFunction(), + testEndpoints, + null + ); + Set nodes = loadBalancer.nodesForPartitionedId(1); + Assert.assertNotNull(nodes); + Assert.assertEquals(1, nodes.size()); + Node node = loadBalancer.nextNode(1); + Assert.assertNotNull(node); + } + + + + @Test + public void testTwoNodes() { + // verify that both endpoints will get hit + Set testEndpoints = new HashSet(); + Set partitionsNodeOne = new HashSet(); + partitionsNodeOne.add(1); + + Node nodeOne = new JavaNode(1, "localhost:9000", true, partitionsNodeOne, Option.empty(), Option.empty()); + Endpoint endpointOne = new TestEndpoint(nodeOne, true); + + Node nodeTwo = new JavaNode(2, "localhost:9001", true, partitionsNodeOne, Option.empty(), Option.empty()); + Endpoint endpointTwo = new TestEndpoint(nodeTwo, true); + + testEndpoints.add(endpointOne); + testEndpoints.add(endpointTwo); + + ConsistentHashPartitionedLoadBalancer loadBalancer = ConsistentHashPartitionedLoadBalancer.build( + 1, + new HashFunction.MD5HashFunction(), + testEndpoints, + null + ); + + Set nodes = loadBalancer.nodesForPartitionedId(1); + Assert.assertNotNull(nodes); + Assert.assertEquals(1, nodes.size()); + + Node resultOne = loadBalancer.nextNode(1); + Node resultTwo = loadBalancer.nextNode(2); + + Assert.assertNotNull(resultOne); + Assert.assertNotNull(resultTwo); + + // this was done via trial and error, there is no shortcut here + Assert.assertEquals(nodeOne, resultOne); + Assert.assertEquals(nodeTwo, resultTwo); + } + + @Test + public void testNonOverlapOfPartitions() { + + } + + +} From 3dfb78e902394d57dba2f9aecd5a85de38c7ed15 Mon Sep 17 00:00:00 2001 From: Nick Hristov Date: Tue, 21 Oct 2014 16:09:32 -0700 Subject: [PATCH 04/20] Rolling back scala version --- gradle.properties | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/gradle.properties b/gradle.properties index 1354b956..221edcf6 100644 --- a/gradle.properties +++ b/gradle.properties @@ -1,4 +1,4 @@ version=0.6.62 -defaultScalaVersion=2.8.1 -targetScalaVersions=2.8.1 +defaultScalaVersion=2.10.3 +targetScalaVersions=2.10.3 crossBuild=false From 6d5ff7a1dd666164b79303de59631327cb00ae5a Mon Sep 17 00:00:00 2001 From: Nick Hristov Date: Wed, 22 Oct 2014 05:45:28 -0700 Subject: [PATCH 05/20] Upgraded protobuf to 2.5.0 --- .gitignore | 1 + build.gradle | 2 +- .../norbert/protos/NorbertExampleProtos.java | 539 +++-- .../norbert/protos/NorbertProtos.java | 1839 ++++++++++++----- .../com/google/protobuf/ByteStringUtils.java | 14 + .../norbert/network/util/ProtoUtils.scala | 23 +- 6 files changed, 1619 insertions(+), 799 deletions(-) create mode 100644 network/src/main/java/com/google/protobuf/ByteStringUtils.java diff --git a/.gitignore b/.gitignore index 7db1cad2..cbf107f3 100644 --- a/.gitignore +++ b/.gitignore @@ -6,6 +6,7 @@ project/boot/ project/plugins/project out/ build/ +.idea .gradle *.iml *.ipr diff --git a/build.gradle b/build.gradle index e2c395b3..d043557d 100644 --- a/build.gradle +++ b/build.gradle @@ -30,7 +30,7 @@ subprojects { ext.externalDependency = [ 'zookeeper':'org.apache.zookeeper:zookeeper:3.3.4', - 'protobuf':'com.google.protobuf:protobuf-java:2.4.0a', + 'protobuf':'com.google.protobuf:protobuf-java:2.5.0', 'log4j':'log4j:log4j:1.2.17', 'netty':'io.netty:netty:3.7.0.Final', 'slf4jApi':'org.slf4j:slf4j-api:1.7.5', diff --git a/cluster/src/main/java/com/linkedin/norbert/protos/NorbertExampleProtos.java b/cluster/src/main/java/com/linkedin/norbert/protos/NorbertExampleProtos.java index 79603582..db503891 100644 --- a/cluster/src/main/java/com/linkedin/norbert/protos/NorbertExampleProtos.java +++ b/cluster/src/main/java/com/linkedin/norbert/protos/NorbertExampleProtos.java @@ -1,5 +1,5 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! -// source: norbert_example.proto +// source: protobuf/norbert_example.proto package com.linkedin.norbert.protos; @@ -10,50 +10,129 @@ public static void registerAllExtensions( } public interface PingOrBuilder extends com.google.protobuf.MessageOrBuilder { - + // required int64 timestamp = 1; + /** + * required int64 timestamp = 1; + */ boolean hasTimestamp(); + /** + * required int64 timestamp = 1; + */ long getTimestamp(); } + /** + * Protobuf type {@code norbert.example.Ping} + */ public static final class Ping extends com.google.protobuf.GeneratedMessage implements PingOrBuilder { // Use Ping.newBuilder() to construct. - private Ping(Builder builder) { + private Ping(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); + this.unknownFields = builder.getUnknownFields(); } - private Ping(boolean noInit) {} - + private Ping(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + private static final Ping defaultInstance; public static Ping getDefaultInstance() { return defaultInstance; } - + public Ping getDefaultInstanceForType() { return defaultInstance; } - + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private Ping( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 8: { + bitField0_ |= 0x00000001; + timestamp_ = input.readInt64(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return com.linkedin.norbert.protos.NorbertExampleProtos.internal_static_norbert_example_Ping_descriptor; } - + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return com.linkedin.norbert.protos.NorbertExampleProtos.internal_static_norbert_example_Ping_fieldAccessorTable; + return com.linkedin.norbert.protos.NorbertExampleProtos.internal_static_norbert_example_Ping_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.linkedin.norbert.protos.NorbertExampleProtos.Ping.class, com.linkedin.norbert.protos.NorbertExampleProtos.Ping.Builder.class); } - + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public Ping parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new Ping(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + private int bitField0_; // required int64 timestamp = 1; public static final int TIMESTAMP_FIELD_NUMBER = 1; private long timestamp_; + /** + * required int64 timestamp = 1; + */ public boolean hasTimestamp() { return ((bitField0_ & 0x00000001) == 0x00000001); } + /** + * required int64 timestamp = 1; + */ public long getTimestamp() { return timestamp_; } - + private void initFields() { timestamp_ = 0L; } @@ -61,7 +140,7 @@ private void initFields() { public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; - + if (!hasTimestamp()) { memoizedIsInitialized = 0; return false; @@ -69,7 +148,7 @@ public final boolean isInitialized() { memoizedIsInitialized = 1; return true; } - + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); @@ -78,12 +157,12 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) } getUnknownFields().writeTo(output); } - + private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; - + size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream @@ -93,92 +172,83 @@ public int getSerializedSize() { memoizedSerializedSize = size; return size; } - + + private static final long serialVersionUID = 0L; @java.lang.Override - protected Object writeReplace() throws java.io.ObjectStreamException { + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { return super.writeReplace(); } - + public static com.linkedin.norbert.protos.NorbertExampleProtos.Ping parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); + return PARSER.parseFrom(data); } public static com.linkedin.norbert.protos.NorbertExampleProtos.Ping parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); + return PARSER.parseFrom(data, extensionRegistry); } public static com.linkedin.norbert.protos.NorbertExampleProtos.Ping parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); + return PARSER.parseFrom(data); } public static com.linkedin.norbert.protos.NorbertExampleProtos.Ping parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); + return PARSER.parseFrom(data, extensionRegistry); } public static com.linkedin.norbert.protos.NorbertExampleProtos.Ping parseFrom(java.io.InputStream input) throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); + return PARSER.parseFrom(input); } public static com.linkedin.norbert.protos.NorbertExampleProtos.Ping parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); + return PARSER.parseFrom(input, extensionRegistry); } public static com.linkedin.norbert.protos.NorbertExampleProtos.Ping parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } + return PARSER.parseDelimitedFrom(input); } public static com.linkedin.norbert.protos.NorbertExampleProtos.Ping parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } + return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static com.linkedin.norbert.protos.NorbertExampleProtos.Ping parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); + return PARSER.parseFrom(input); } public static com.linkedin.norbert.protos.NorbertExampleProtos.Ping parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); + return PARSER.parseFrom(input, extensionRegistry); } - + public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(com.linkedin.norbert.protos.NorbertExampleProtos.Ping prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } - + @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } + /** + * Protobuf type {@code norbert.example.Ping} + */ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder implements com.linkedin.norbert.protos.NorbertExampleProtos.PingOrBuilder { @@ -186,18 +256,21 @@ public static final class Builder extends getDescriptor() { return com.linkedin.norbert.protos.NorbertExampleProtos.internal_static_norbert_example_Ping_descriptor; } - + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return com.linkedin.norbert.protos.NorbertExampleProtos.internal_static_norbert_example_Ping_fieldAccessorTable; + return com.linkedin.norbert.protos.NorbertExampleProtos.internal_static_norbert_example_Ping_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.linkedin.norbert.protos.NorbertExampleProtos.Ping.class, com.linkedin.norbert.protos.NorbertExampleProtos.Ping.Builder.class); } - + // Construct using com.linkedin.norbert.protos.NorbertExampleProtos.Ping.newBuilder() private Builder() { maybeForceBuilderInitialization(); } - - private Builder(BuilderParent parent) { + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } @@ -208,27 +281,27 @@ private void maybeForceBuilderInitialization() { private static Builder create() { return new Builder(); } - + public Builder clear() { super.clear(); timestamp_ = 0L; bitField0_ = (bitField0_ & ~0x00000001); return this; } - + public Builder clone() { return create().mergeFrom(buildPartial()); } - + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return com.linkedin.norbert.protos.NorbertExampleProtos.Ping.getDescriptor(); + return com.linkedin.norbert.protos.NorbertExampleProtos.internal_static_norbert_example_Ping_descriptor; } - + public com.linkedin.norbert.protos.NorbertExampleProtos.Ping getDefaultInstanceForType() { return com.linkedin.norbert.protos.NorbertExampleProtos.Ping.getDefaultInstance(); } - + public com.linkedin.norbert.protos.NorbertExampleProtos.Ping build() { com.linkedin.norbert.protos.NorbertExampleProtos.Ping result = buildPartial(); if (!result.isInitialized()) { @@ -236,17 +309,7 @@ public com.linkedin.norbert.protos.NorbertExampleProtos.Ping build() { } return result; } - - private com.linkedin.norbert.protos.NorbertExampleProtos.Ping buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - com.linkedin.norbert.protos.NorbertExampleProtos.Ping result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - + public com.linkedin.norbert.protos.NorbertExampleProtos.Ping buildPartial() { com.linkedin.norbert.protos.NorbertExampleProtos.Ping result = new com.linkedin.norbert.protos.NorbertExampleProtos.Ping(this); int from_bitField0_ = bitField0_; @@ -259,7 +322,7 @@ public com.linkedin.norbert.protos.NorbertExampleProtos.Ping buildPartial() { onBuilt(); return result; } - + public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof com.linkedin.norbert.protos.NorbertExampleProtos.Ping) { return mergeFrom((com.linkedin.norbert.protos.NorbertExampleProtos.Ping)other); @@ -268,7 +331,7 @@ public Builder mergeFrom(com.google.protobuf.Message other) { return this; } } - + public Builder mergeFrom(com.linkedin.norbert.protos.NorbertExampleProtos.Ping other) { if (other == com.linkedin.norbert.protos.NorbertExampleProtos.Ping.getDefaultInstance()) return this; if (other.hasTimestamp()) { @@ -277,7 +340,7 @@ public Builder mergeFrom(com.linkedin.norbert.protos.NorbertExampleProtos.Ping o this.mergeUnknownFields(other.getUnknownFields()); return this; } - + public final boolean isInitialized() { if (!hasTimestamp()) { @@ -285,119 +348,195 @@ public final boolean isInitialized() { } return true; } - + public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 8: { - bitField0_ |= 0x00000001; - timestamp_ = input.readInt64(); - break; - } + com.linkedin.norbert.protos.NorbertExampleProtos.Ping parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (com.linkedin.norbert.protos.NorbertExampleProtos.Ping) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); } } + return this; } - private int bitField0_; - + // required int64 timestamp = 1; private long timestamp_ ; + /** + * required int64 timestamp = 1; + */ public boolean hasTimestamp() { return ((bitField0_ & 0x00000001) == 0x00000001); } + /** + * required int64 timestamp = 1; + */ public long getTimestamp() { return timestamp_; } + /** + * required int64 timestamp = 1; + */ public Builder setTimestamp(long value) { bitField0_ |= 0x00000001; timestamp_ = value; onChanged(); return this; } + /** + * required int64 timestamp = 1; + */ public Builder clearTimestamp() { bitField0_ = (bitField0_ & ~0x00000001); timestamp_ = 0L; onChanged(); return this; } - + // @@protoc_insertion_point(builder_scope:norbert.example.Ping) } - + static { defaultInstance = new Ping(true); defaultInstance.initFields(); } - + // @@protoc_insertion_point(class_scope:norbert.example.Ping) } - + public interface PingResponseOrBuilder extends com.google.protobuf.MessageOrBuilder { - + // required int64 timestamp = 1; + /** + * required int64 timestamp = 1; + */ boolean hasTimestamp(); + /** + * required int64 timestamp = 1; + */ long getTimestamp(); } + /** + * Protobuf type {@code norbert.example.PingResponse} + */ public static final class PingResponse extends com.google.protobuf.GeneratedMessage implements PingResponseOrBuilder { // Use PingResponse.newBuilder() to construct. - private PingResponse(Builder builder) { + private PingResponse(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); + this.unknownFields = builder.getUnknownFields(); } - private PingResponse(boolean noInit) {} - + private PingResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + private static final PingResponse defaultInstance; public static PingResponse getDefaultInstance() { return defaultInstance; } - + public PingResponse getDefaultInstanceForType() { return defaultInstance; } - + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private PingResponse( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 8: { + bitField0_ |= 0x00000001; + timestamp_ = input.readInt64(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return com.linkedin.norbert.protos.NorbertExampleProtos.internal_static_norbert_example_PingResponse_descriptor; } - + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return com.linkedin.norbert.protos.NorbertExampleProtos.internal_static_norbert_example_PingResponse_fieldAccessorTable; + return com.linkedin.norbert.protos.NorbertExampleProtos.internal_static_norbert_example_PingResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.linkedin.norbert.protos.NorbertExampleProtos.PingResponse.class, com.linkedin.norbert.protos.NorbertExampleProtos.PingResponse.Builder.class); } - + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public PingResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new PingResponse(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + private int bitField0_; // required int64 timestamp = 1; public static final int TIMESTAMP_FIELD_NUMBER = 1; private long timestamp_; + /** + * required int64 timestamp = 1; + */ public boolean hasTimestamp() { return ((bitField0_ & 0x00000001) == 0x00000001); } + /** + * required int64 timestamp = 1; + */ public long getTimestamp() { return timestamp_; } - + private void initFields() { timestamp_ = 0L; } @@ -405,7 +544,7 @@ private void initFields() { public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; - + if (!hasTimestamp()) { memoizedIsInitialized = 0; return false; @@ -413,7 +552,7 @@ public final boolean isInitialized() { memoizedIsInitialized = 1; return true; } - + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); @@ -422,12 +561,12 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) } getUnknownFields().writeTo(output); } - + private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; - + size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream @@ -437,92 +576,83 @@ public int getSerializedSize() { memoizedSerializedSize = size; return size; } - + + private static final long serialVersionUID = 0L; @java.lang.Override - protected Object writeReplace() throws java.io.ObjectStreamException { + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { return super.writeReplace(); } - + public static com.linkedin.norbert.protos.NorbertExampleProtos.PingResponse parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); + return PARSER.parseFrom(data); } public static com.linkedin.norbert.protos.NorbertExampleProtos.PingResponse parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); + return PARSER.parseFrom(data, extensionRegistry); } public static com.linkedin.norbert.protos.NorbertExampleProtos.PingResponse parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); + return PARSER.parseFrom(data); } public static com.linkedin.norbert.protos.NorbertExampleProtos.PingResponse parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); + return PARSER.parseFrom(data, extensionRegistry); } public static com.linkedin.norbert.protos.NorbertExampleProtos.PingResponse parseFrom(java.io.InputStream input) throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); + return PARSER.parseFrom(input); } public static com.linkedin.norbert.protos.NorbertExampleProtos.PingResponse parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); + return PARSER.parseFrom(input, extensionRegistry); } public static com.linkedin.norbert.protos.NorbertExampleProtos.PingResponse parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } + return PARSER.parseDelimitedFrom(input); } public static com.linkedin.norbert.protos.NorbertExampleProtos.PingResponse parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } + return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static com.linkedin.norbert.protos.NorbertExampleProtos.PingResponse parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); + return PARSER.parseFrom(input); } public static com.linkedin.norbert.protos.NorbertExampleProtos.PingResponse parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); + return PARSER.parseFrom(input, extensionRegistry); } - + public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(com.linkedin.norbert.protos.NorbertExampleProtos.PingResponse prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } - + @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } + /** + * Protobuf type {@code norbert.example.PingResponse} + */ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder implements com.linkedin.norbert.protos.NorbertExampleProtos.PingResponseOrBuilder { @@ -530,18 +660,21 @@ public static final class Builder extends getDescriptor() { return com.linkedin.norbert.protos.NorbertExampleProtos.internal_static_norbert_example_PingResponse_descriptor; } - + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return com.linkedin.norbert.protos.NorbertExampleProtos.internal_static_norbert_example_PingResponse_fieldAccessorTable; + return com.linkedin.norbert.protos.NorbertExampleProtos.internal_static_norbert_example_PingResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.linkedin.norbert.protos.NorbertExampleProtos.PingResponse.class, com.linkedin.norbert.protos.NorbertExampleProtos.PingResponse.Builder.class); } - + // Construct using com.linkedin.norbert.protos.NorbertExampleProtos.PingResponse.newBuilder() private Builder() { maybeForceBuilderInitialization(); } - - private Builder(BuilderParent parent) { + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } @@ -552,27 +685,27 @@ private void maybeForceBuilderInitialization() { private static Builder create() { return new Builder(); } - + public Builder clear() { super.clear(); timestamp_ = 0L; bitField0_ = (bitField0_ & ~0x00000001); return this; } - + public Builder clone() { return create().mergeFrom(buildPartial()); } - + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return com.linkedin.norbert.protos.NorbertExampleProtos.PingResponse.getDescriptor(); + return com.linkedin.norbert.protos.NorbertExampleProtos.internal_static_norbert_example_PingResponse_descriptor; } - + public com.linkedin.norbert.protos.NorbertExampleProtos.PingResponse getDefaultInstanceForType() { return com.linkedin.norbert.protos.NorbertExampleProtos.PingResponse.getDefaultInstance(); } - + public com.linkedin.norbert.protos.NorbertExampleProtos.PingResponse build() { com.linkedin.norbert.protos.NorbertExampleProtos.PingResponse result = buildPartial(); if (!result.isInitialized()) { @@ -580,17 +713,7 @@ public com.linkedin.norbert.protos.NorbertExampleProtos.PingResponse build() { } return result; } - - private com.linkedin.norbert.protos.NorbertExampleProtos.PingResponse buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - com.linkedin.norbert.protos.NorbertExampleProtos.PingResponse result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - + public com.linkedin.norbert.protos.NorbertExampleProtos.PingResponse buildPartial() { com.linkedin.norbert.protos.NorbertExampleProtos.PingResponse result = new com.linkedin.norbert.protos.NorbertExampleProtos.PingResponse(this); int from_bitField0_ = bitField0_; @@ -603,7 +726,7 @@ public com.linkedin.norbert.protos.NorbertExampleProtos.PingResponse buildPartia onBuilt(); return result; } - + public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof com.linkedin.norbert.protos.NorbertExampleProtos.PingResponse) { return mergeFrom((com.linkedin.norbert.protos.NorbertExampleProtos.PingResponse)other); @@ -612,7 +735,7 @@ public Builder mergeFrom(com.google.protobuf.Message other) { return this; } } - + public Builder mergeFrom(com.linkedin.norbert.protos.NorbertExampleProtos.PingResponse other) { if (other == com.linkedin.norbert.protos.NorbertExampleProtos.PingResponse.getDefaultInstance()) return this; if (other.hasTimestamp()) { @@ -621,7 +744,7 @@ public Builder mergeFrom(com.linkedin.norbert.protos.NorbertExampleProtos.PingRe this.mergeUnknownFields(other.getUnknownFields()); return this; } - + public final boolean isInitialized() { if (!hasTimestamp()) { @@ -629,73 +752,70 @@ public final boolean isInitialized() { } return true; } - + public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 8: { - bitField0_ |= 0x00000001; - timestamp_ = input.readInt64(); - break; - } + com.linkedin.norbert.protos.NorbertExampleProtos.PingResponse parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (com.linkedin.norbert.protos.NorbertExampleProtos.PingResponse) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); } } + return this; } - private int bitField0_; - + // required int64 timestamp = 1; private long timestamp_ ; + /** + * required int64 timestamp = 1; + */ public boolean hasTimestamp() { return ((bitField0_ & 0x00000001) == 0x00000001); } + /** + * required int64 timestamp = 1; + */ public long getTimestamp() { return timestamp_; } + /** + * required int64 timestamp = 1; + */ public Builder setTimestamp(long value) { bitField0_ |= 0x00000001; timestamp_ = value; onChanged(); return this; } + /** + * required int64 timestamp = 1; + */ public Builder clearTimestamp() { bitField0_ = (bitField0_ & ~0x00000001); timestamp_ = 0L; onChanged(); return this; } - + // @@protoc_insertion_point(builder_scope:norbert.example.PingResponse) } - + static { defaultInstance = new PingResponse(true); defaultInstance.initFields(); } - + // @@protoc_insertion_point(class_scope:norbert.example.PingResponse) } - + private static com.google.protobuf.Descriptors.Descriptor internal_static_norbert_example_Ping_descriptor; private static @@ -706,7 +826,7 @@ public Builder clearTimestamp() { private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_norbert_example_PingResponse_fieldAccessorTable; - + public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { return descriptor; @@ -715,10 +835,11 @@ public Builder clearTimestamp() { descriptor; static { java.lang.String[] descriptorData = { - "\n\025norbert_example.proto\022\017norbert.example" + - "\"\031\n\004Ping\022\021\n\ttimestamp\030\001 \002(\003\"!\n\014PingRespo" + - "nse\022\021\n\ttimestamp\030\001 \002(\003B5\n\033com.linkedin.n" + - "orbert.protosB\024NorbertExampleProtosH\001" + "\n\036protobuf/norbert_example.proto\022\017norber" + + "t.example\"\031\n\004Ping\022\021\n\ttimestamp\030\001 \002(\003\"!\n\014" + + "PingResponse\022\021\n\ttimestamp\030\001 \002(\003B5\n\033com.l" + + "inkedin.norbert.protosB\024NorbertExamplePr" + + "otosH\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -730,17 +851,13 @@ public com.google.protobuf.ExtensionRegistry assignDescriptors( internal_static_norbert_example_Ping_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_norbert_example_Ping_descriptor, - new java.lang.String[] { "Timestamp", }, - com.linkedin.norbert.protos.NorbertExampleProtos.Ping.class, - com.linkedin.norbert.protos.NorbertExampleProtos.Ping.Builder.class); + new java.lang.String[] { "Timestamp", }); internal_static_norbert_example_PingResponse_descriptor = getDescriptor().getMessageTypes().get(1); internal_static_norbert_example_PingResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_norbert_example_PingResponse_descriptor, - new java.lang.String[] { "Timestamp", }, - com.linkedin.norbert.protos.NorbertExampleProtos.PingResponse.class, - com.linkedin.norbert.protos.NorbertExampleProtos.PingResponse.Builder.class); + new java.lang.String[] { "Timestamp", }); return null; } }; @@ -749,6 +866,6 @@ public com.google.protobuf.ExtensionRegistry assignDescriptors( new com.google.protobuf.Descriptors.FileDescriptor[] { }, assigner); } - + // @@protoc_insertion_point(outer_class_scope) } diff --git a/cluster/src/main/java/com/linkedin/norbert/protos/NorbertProtos.java b/cluster/src/main/java/com/linkedin/norbert/protos/NorbertProtos.java index e3500a80..6f204cff 100644 --- a/cluster/src/main/java/com/linkedin/norbert/protos/NorbertProtos.java +++ b/cluster/src/main/java/com/linkedin/norbert/protos/NorbertProtos.java @@ -1,5 +1,5 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! -// source: norbert.proto +// source: protobuf/norbert.proto package com.linkedin.norbert.protos; @@ -10,83 +10,274 @@ public static void registerAllExtensions( } public interface NorbertMessageOrBuilder extends com.google.protobuf.MessageOrBuilder { - + // required sfixed64 request_id_msb = 1; + /** + * required sfixed64 request_id_msb = 1; + */ boolean hasRequestIdMsb(); + /** + * required sfixed64 request_id_msb = 1; + */ long getRequestIdMsb(); - + // required sfixed64 request_id_lsb = 2; + /** + * required sfixed64 request_id_lsb = 2; + */ boolean hasRequestIdLsb(); + /** + * required sfixed64 request_id_lsb = 2; + */ long getRequestIdLsb(); - + // optional .norbert.NorbertMessage.Status status = 10 [default = OK]; + /** + * optional .norbert.NorbertMessage.Status status = 10 [default = OK]; + */ boolean hasStatus(); + /** + * optional .norbert.NorbertMessage.Status status = 10 [default = OK]; + */ com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Status getStatus(); - + // required string message_name = 11; + /** + * required string message_name = 11; + */ boolean hasMessageName(); - String getMessageName(); - + /** + * required string message_name = 11; + */ + java.lang.String getMessageName(); + /** + * required string message_name = 11; + */ + com.google.protobuf.ByteString + getMessageNameBytes(); + // optional bytes message = 12; + /** + * optional bytes message = 12; + */ boolean hasMessage(); + /** + * optional bytes message = 12; + */ com.google.protobuf.ByteString getMessage(); - + // optional string error_message = 13; + /** + * optional string error_message = 13; + */ boolean hasErrorMessage(); - String getErrorMessage(); - + /** + * optional string error_message = 13; + */ + java.lang.String getErrorMessage(); + /** + * optional string error_message = 13; + */ + com.google.protobuf.ByteString + getErrorMessageBytes(); + // repeated .norbert.NorbertMessage.Header header = 14; + /** + * repeated .norbert.NorbertMessage.Header header = 14; + */ java.util.List getHeaderList(); + /** + * repeated .norbert.NorbertMessage.Header header = 14; + */ com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Header getHeader(int index); + /** + * repeated .norbert.NorbertMessage.Header header = 14; + */ int getHeaderCount(); + /** + * repeated .norbert.NorbertMessage.Header header = 14; + */ java.util.List getHeaderOrBuilderList(); + /** + * repeated .norbert.NorbertMessage.Header header = 14; + */ com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.HeaderOrBuilder getHeaderOrBuilder( int index); } + /** + * Protobuf type {@code norbert.NorbertMessage} + */ public static final class NorbertMessage extends com.google.protobuf.GeneratedMessage implements NorbertMessageOrBuilder { // Use NorbertMessage.newBuilder() to construct. - private NorbertMessage(Builder builder) { + private NorbertMessage(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); + this.unknownFields = builder.getUnknownFields(); } - private NorbertMessage(boolean noInit) {} - + private NorbertMessage(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + private static final NorbertMessage defaultInstance; public static NorbertMessage getDefaultInstance() { return defaultInstance; } - + public NorbertMessage getDefaultInstanceForType() { return defaultInstance; } - + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private NorbertMessage( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 9: { + bitField0_ |= 0x00000001; + requestIdMsb_ = input.readSFixed64(); + break; + } + case 17: { + bitField0_ |= 0x00000002; + requestIdLsb_ = input.readSFixed64(); + break; + } + case 80: { + int rawValue = input.readEnum(); + com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Status value = com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Status.valueOf(rawValue); + if (value == null) { + unknownFields.mergeVarintField(10, rawValue); + } else { + bitField0_ |= 0x00000004; + status_ = value; + } + break; + } + case 90: { + bitField0_ |= 0x00000008; + messageName_ = input.readBytes(); + break; + } + case 98: { + bitField0_ |= 0x00000010; + message_ = input.readBytes(); + break; + } + case 106: { + bitField0_ |= 0x00000020; + errorMessage_ = input.readBytes(); + break; + } + case 114: { + if (!((mutable_bitField0_ & 0x00000040) == 0x00000040)) { + header_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000040; + } + header_.add(input.readMessage(com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Header.PARSER, extensionRegistry)); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000040) == 0x00000040)) { + header_ = java.util.Collections.unmodifiableList(header_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return com.linkedin.norbert.protos.NorbertProtos.internal_static_norbert_NorbertMessage_descriptor; } - + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return com.linkedin.norbert.protos.NorbertProtos.internal_static_norbert_NorbertMessage_fieldAccessorTable; + return com.linkedin.norbert.protos.NorbertProtos.internal_static_norbert_NorbertMessage_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.class, com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public NorbertMessage parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new NorbertMessage(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; } - + + /** + * Protobuf enum {@code norbert.NorbertMessage.Status} + */ public enum Status implements com.google.protobuf.ProtocolMessageEnum { + /** + * OK = 0; + */ OK(0, 0), + /** + * ERROR = 1; + */ ERROR(1, 1), + /** + * HEAVYLOAD = 2; + */ HEAVYLOAD(2, 2), ; - + + /** + * OK = 0; + */ public static final int OK_VALUE = 0; + /** + * ERROR = 1; + */ public static final int ERROR_VALUE = 1; + /** + * HEAVYLOAD = 2; + */ public static final int HEAVYLOAD_VALUE = 2; - - + + public final int getNumber() { return value; } - + public static Status valueOf(int value) { switch (value) { case 0: return OK; @@ -95,7 +286,7 @@ public static Status valueOf(int value) { default: return null; } } - + public static com.google.protobuf.Internal.EnumLiteMap internalGetValueMap() { return internalValueMap; @@ -107,7 +298,7 @@ public Status findValueByNumber(int number) { return Status.valueOf(number); } }; - + public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { return getDescriptor().getValues().get(index); @@ -120,11 +311,9 @@ public Status findValueByNumber(int number) { getDescriptor() { return com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.getDescriptor().getEnumTypes().get(0); } - - private static final Status[] VALUES = { - OK, ERROR, HEAVYLOAD, - }; - + + private static final Status[] VALUES = values(); + public static Status valueOf( com.google.protobuf.Descriptors.EnumValueDescriptor desc) { if (desc.getType() != getDescriptor()) { @@ -133,122 +322,238 @@ public static Status valueOf( } return VALUES[desc.getIndex()]; } - + private final int index; private final int value; - + private Status(int index, int value) { this.index = index; this.value = value; } - + // @@protoc_insertion_point(enum_scope:norbert.NorbertMessage.Status) } - + public interface HeaderOrBuilder extends com.google.protobuf.MessageOrBuilder { - + // required string key = 1; + /** + * required string key = 1; + */ boolean hasKey(); - String getKey(); - + /** + * required string key = 1; + */ + java.lang.String getKey(); + /** + * required string key = 1; + */ + com.google.protobuf.ByteString + getKeyBytes(); + // optional string value = 2; + /** + * optional string value = 2; + */ boolean hasValue(); - String getValue(); - } + /** + * optional string value = 2; + */ + java.lang.String getValue(); + /** + * optional string value = 2; + */ + com.google.protobuf.ByteString + getValueBytes(); + } + /** + * Protobuf type {@code norbert.NorbertMessage.Header} + */ public static final class Header extends com.google.protobuf.GeneratedMessage implements HeaderOrBuilder { // Use Header.newBuilder() to construct. - private Header(Builder builder) { + private Header(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); + this.unknownFields = builder.getUnknownFields(); } - private Header(boolean noInit) {} - + private Header(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + private static final Header defaultInstance; public static Header getDefaultInstance() { return defaultInstance; } - + public Header getDefaultInstanceForType() { return defaultInstance; } - + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private Header( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + key_ = input.readBytes(); + break; + } + case 18: { + bitField0_ |= 0x00000002; + value_ = input.readBytes(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return com.linkedin.norbert.protos.NorbertProtos.internal_static_norbert_NorbertMessage_Header_descriptor; } - + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return com.linkedin.norbert.protos.NorbertProtos.internal_static_norbert_NorbertMessage_Header_fieldAccessorTable; + return com.linkedin.norbert.protos.NorbertProtos.internal_static_norbert_NorbertMessage_Header_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Header.class, com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Header.Builder.class); + } + + public static com.google.protobuf.Parser
PARSER = + new com.google.protobuf.AbstractParser
() { + public Header parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new Header(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser
getParserForType() { + return PARSER; } - + private int bitField0_; // required string key = 1; public static final int KEY_FIELD_NUMBER = 1; private java.lang.Object key_; + /** + * required string key = 1; + */ public boolean hasKey() { return ((bitField0_ & 0x00000001) == 0x00000001); } - public String getKey() { + /** + * required string key = 1; + */ + public java.lang.String getKey() { java.lang.Object ref = key_; - if (ref instanceof String) { - return (String) ref; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; } else { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; - String s = bs.toStringUtf8(); - if (com.google.protobuf.Internal.isValidUtf8(bs)) { + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { key_ = s; } return s; } } - private com.google.protobuf.ByteString getKeyBytes() { + /** + * required string key = 1; + */ + public com.google.protobuf.ByteString + getKeyBytes() { java.lang.Object ref = key_; - if (ref instanceof String) { + if (ref instanceof java.lang.String) { com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((String) ref); + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); key_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } - + // optional string value = 2; public static final int VALUE_FIELD_NUMBER = 2; private java.lang.Object value_; + /** + * optional string value = 2; + */ public boolean hasValue() { return ((bitField0_ & 0x00000002) == 0x00000002); } - public String getValue() { + /** + * optional string value = 2; + */ + public java.lang.String getValue() { java.lang.Object ref = value_; - if (ref instanceof String) { - return (String) ref; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; } else { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; - String s = bs.toStringUtf8(); - if (com.google.protobuf.Internal.isValidUtf8(bs)) { + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { value_ = s; } return s; } } - private com.google.protobuf.ByteString getValueBytes() { + /** + * optional string value = 2; + */ + public com.google.protobuf.ByteString + getValueBytes() { java.lang.Object ref = value_; - if (ref instanceof String) { + if (ref instanceof java.lang.String) { com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((String) ref); + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); value_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } - + private void initFields() { key_ = ""; value_ = ""; @@ -257,7 +562,7 @@ private void initFields() { public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; - + if (!hasKey()) { memoizedIsInitialized = 0; return false; @@ -265,7 +570,7 @@ public final boolean isInitialized() { memoizedIsInitialized = 1; return true; } - + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); @@ -277,12 +582,12 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) } getUnknownFields().writeTo(output); } - + private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; - + size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream @@ -296,94 +601,83 @@ public int getSerializedSize() { memoizedSerializedSize = size; return size; } - + private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } - + public static com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Header parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); + return PARSER.parseFrom(data); } public static com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Header parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); + return PARSER.parseFrom(data, extensionRegistry); } public static com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Header parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); + return PARSER.parseFrom(data); } public static com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Header parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); + return PARSER.parseFrom(data, extensionRegistry); } public static com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Header parseFrom(java.io.InputStream input) throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); + return PARSER.parseFrom(input); } public static com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Header parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); + return PARSER.parseFrom(input, extensionRegistry); } public static com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Header parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } + return PARSER.parseDelimitedFrom(input); } public static com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Header parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } + return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Header parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); + return PARSER.parseFrom(input); } public static com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Header parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); + return PARSER.parseFrom(input, extensionRegistry); } - + public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Header prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } - + @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } + /** + * Protobuf type {@code norbert.NorbertMessage.Header} + */ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder implements com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.HeaderOrBuilder { @@ -391,18 +685,21 @@ public static final class Builder extends getDescriptor() { return com.linkedin.norbert.protos.NorbertProtos.internal_static_norbert_NorbertMessage_Header_descriptor; } - + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return com.linkedin.norbert.protos.NorbertProtos.internal_static_norbert_NorbertMessage_Header_fieldAccessorTable; + return com.linkedin.norbert.protos.NorbertProtos.internal_static_norbert_NorbertMessage_Header_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Header.class, com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Header.Builder.class); } - + // Construct using com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Header.newBuilder() private Builder() { maybeForceBuilderInitialization(); } - - private Builder(BuilderParent parent) { + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } @@ -413,7 +710,7 @@ private void maybeForceBuilderInitialization() { private static Builder create() { return new Builder(); } - + public Builder clear() { super.clear(); key_ = ""; @@ -422,20 +719,20 @@ public Builder clear() { bitField0_ = (bitField0_ & ~0x00000002); return this; } - + public Builder clone() { return create().mergeFrom(buildPartial()); } - + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Header.getDescriptor(); + return com.linkedin.norbert.protos.NorbertProtos.internal_static_norbert_NorbertMessage_Header_descriptor; } - + public com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Header getDefaultInstanceForType() { return com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Header.getDefaultInstance(); } - + public com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Header build() { com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Header result = buildPartial(); if (!result.isInitialized()) { @@ -443,17 +740,7 @@ public com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Header build() { } return result; } - - private com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Header buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Header result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - + public com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Header buildPartial() { com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Header result = new com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Header(this); int from_bitField0_ = bitField0_; @@ -470,7 +757,7 @@ public com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Header buildPart onBuilt(); return result; } - + public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Header) { return mergeFrom((com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Header)other); @@ -479,19 +766,23 @@ public Builder mergeFrom(com.google.protobuf.Message other) { return this; } } - + public Builder mergeFrom(com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Header other) { if (other == com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Header.getDefaultInstance()) return this; if (other.hasKey()) { - setKey(other.getKey()); + bitField0_ |= 0x00000001; + key_ = other.key_; + onChanged(); } if (other.hasValue()) { - setValue(other.getValue()); + bitField0_ |= 0x00000002; + value_ = other.value_; + onChanged(); } this.mergeUnknownFields(other.getUnknownFields()); return this; } - + public final boolean isInitialized() { if (!hasKey()) { @@ -499,62 +790,69 @@ public final boolean isInitialized() { } return true; } - + public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 10: { - bitField0_ |= 0x00000001; - key_ = input.readBytes(); - break; - } - case 18: { - bitField0_ |= 0x00000002; - value_ = input.readBytes(); - break; - } + com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Header parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Header) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); } } + return this; } - private int bitField0_; - + // required string key = 1; private java.lang.Object key_ = ""; + /** + * required string key = 1; + */ public boolean hasKey() { return ((bitField0_ & 0x00000001) == 0x00000001); } - public String getKey() { + /** + * required string key = 1; + */ + public java.lang.String getKey() { java.lang.Object ref = key_; - if (!(ref instanceof String)) { - String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); key_ = s; return s; } else { - return (String) ref; + return (java.lang.String) ref; } } - public Builder setKey(String value) { + /** + * required string key = 1; + */ + public com.google.protobuf.ByteString + getKeyBytes() { + java.lang.Object ref = key_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + key_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string key = 1; + */ + public Builder setKey( + java.lang.String value) { if (value == null) { throw new NullPointerException(); } @@ -563,34 +861,72 @@ public Builder setKey(String value) { onChanged(); return this; } + /** + * required string key = 1; + */ public Builder clearKey() { bitField0_ = (bitField0_ & ~0x00000001); key_ = getDefaultInstance().getKey(); onChanged(); return this; } - void setKey(com.google.protobuf.ByteString value) { - bitField0_ |= 0x00000001; + /** + * required string key = 1; + */ + public Builder setKeyBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; key_ = value; onChanged(); + return this; } - + // optional string value = 2; private java.lang.Object value_ = ""; + /** + * optional string value = 2; + */ public boolean hasValue() { return ((bitField0_ & 0x00000002) == 0x00000002); } - public String getValue() { + /** + * optional string value = 2; + */ + public java.lang.String getValue() { java.lang.Object ref = value_; - if (!(ref instanceof String)) { - String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); value_ = s; return s; } else { - return (String) ref; + return (java.lang.String) ref; } } - public Builder setValue(String value) { + /** + * optional string value = 2; + */ + public com.google.protobuf.ByteString + getValueBytes() { + java.lang.Object ref = value_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + value_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * optional string value = 2; + */ + public Builder setValue( + java.lang.String value) { if (value == null) { throw new NullPointerException(); } @@ -599,155 +935,227 @@ public Builder setValue(String value) { onChanged(); return this; } + /** + * optional string value = 2; + */ public Builder clearValue() { bitField0_ = (bitField0_ & ~0x00000002); value_ = getDefaultInstance().getValue(); onChanged(); return this; } - void setValue(com.google.protobuf.ByteString value) { - bitField0_ |= 0x00000002; + /** + * optional string value = 2; + */ + public Builder setValueBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; value_ = value; onChanged(); + return this; } - + // @@protoc_insertion_point(builder_scope:norbert.NorbertMessage.Header) } - + static { defaultInstance = new Header(true); defaultInstance.initFields(); } - + // @@protoc_insertion_point(class_scope:norbert.NorbertMessage.Header) } - + private int bitField0_; // required sfixed64 request_id_msb = 1; public static final int REQUEST_ID_MSB_FIELD_NUMBER = 1; private long requestIdMsb_; + /** + * required sfixed64 request_id_msb = 1; + */ public boolean hasRequestIdMsb() { return ((bitField0_ & 0x00000001) == 0x00000001); } + /** + * required sfixed64 request_id_msb = 1; + */ public long getRequestIdMsb() { return requestIdMsb_; } - + // required sfixed64 request_id_lsb = 2; public static final int REQUEST_ID_LSB_FIELD_NUMBER = 2; private long requestIdLsb_; + /** + * required sfixed64 request_id_lsb = 2; + */ public boolean hasRequestIdLsb() { return ((bitField0_ & 0x00000002) == 0x00000002); } + /** + * required sfixed64 request_id_lsb = 2; + */ public long getRequestIdLsb() { return requestIdLsb_; } - + // optional .norbert.NorbertMessage.Status status = 10 [default = OK]; public static final int STATUS_FIELD_NUMBER = 10; private com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Status status_; + /** + * optional .norbert.NorbertMessage.Status status = 10 [default = OK]; + */ public boolean hasStatus() { return ((bitField0_ & 0x00000004) == 0x00000004); } + /** + * optional .norbert.NorbertMessage.Status status = 10 [default = OK]; + */ public com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Status getStatus() { return status_; } - + // required string message_name = 11; public static final int MESSAGE_NAME_FIELD_NUMBER = 11; private java.lang.Object messageName_; + /** + * required string message_name = 11; + */ public boolean hasMessageName() { return ((bitField0_ & 0x00000008) == 0x00000008); } - public String getMessageName() { + /** + * required string message_name = 11; + */ + public java.lang.String getMessageName() { java.lang.Object ref = messageName_; - if (ref instanceof String) { - return (String) ref; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; } else { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; - String s = bs.toStringUtf8(); - if (com.google.protobuf.Internal.isValidUtf8(bs)) { + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { messageName_ = s; } return s; } } - private com.google.protobuf.ByteString getMessageNameBytes() { + /** + * required string message_name = 11; + */ + public com.google.protobuf.ByteString + getMessageNameBytes() { java.lang.Object ref = messageName_; - if (ref instanceof String) { + if (ref instanceof java.lang.String) { com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((String) ref); + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); messageName_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } - + // optional bytes message = 12; public static final int MESSAGE_FIELD_NUMBER = 12; private com.google.protobuf.ByteString message_; + /** + * optional bytes message = 12; + */ public boolean hasMessage() { return ((bitField0_ & 0x00000010) == 0x00000010); } + /** + * optional bytes message = 12; + */ public com.google.protobuf.ByteString getMessage() { return message_; } - + // optional string error_message = 13; public static final int ERROR_MESSAGE_FIELD_NUMBER = 13; private java.lang.Object errorMessage_; + /** + * optional string error_message = 13; + */ public boolean hasErrorMessage() { return ((bitField0_ & 0x00000020) == 0x00000020); } - public String getErrorMessage() { + /** + * optional string error_message = 13; + */ + public java.lang.String getErrorMessage() { java.lang.Object ref = errorMessage_; - if (ref instanceof String) { - return (String) ref; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; } else { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; - String s = bs.toStringUtf8(); - if (com.google.protobuf.Internal.isValidUtf8(bs)) { + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { errorMessage_ = s; } return s; } } - private com.google.protobuf.ByteString getErrorMessageBytes() { + /** + * optional string error_message = 13; + */ + public com.google.protobuf.ByteString + getErrorMessageBytes() { java.lang.Object ref = errorMessage_; - if (ref instanceof String) { + if (ref instanceof java.lang.String) { com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((String) ref); + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); errorMessage_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } - + // repeated .norbert.NorbertMessage.Header header = 14; public static final int HEADER_FIELD_NUMBER = 14; private java.util.List header_; + /** + * repeated .norbert.NorbertMessage.Header header = 14; + */ public java.util.List getHeaderList() { return header_; } + /** + * repeated .norbert.NorbertMessage.Header header = 14; + */ public java.util.List getHeaderOrBuilderList() { return header_; } + /** + * repeated .norbert.NorbertMessage.Header header = 14; + */ public int getHeaderCount() { return header_.size(); } + /** + * repeated .norbert.NorbertMessage.Header header = 14; + */ public com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Header getHeader(int index) { return header_.get(index); } + /** + * repeated .norbert.NorbertMessage.Header header = 14; + */ public com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.HeaderOrBuilder getHeaderOrBuilder( int index) { return header_.get(index); } - + private void initFields() { requestIdMsb_ = 0L; requestIdLsb_ = 0L; @@ -761,7 +1169,7 @@ private void initFields() { public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; - + if (!hasRequestIdMsb()) { memoizedIsInitialized = 0; return false; @@ -783,7 +1191,7 @@ public final boolean isInitialized() { memoizedIsInitialized = 1; return true; } - + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); @@ -810,12 +1218,12 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) } getUnknownFields().writeTo(output); } - + private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; - + size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream @@ -849,94 +1257,83 @@ public int getSerializedSize() { memoizedSerializedSize = size; return size; } - + private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } - + public static com.linkedin.norbert.protos.NorbertProtos.NorbertMessage parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); + return PARSER.parseFrom(data); } public static com.linkedin.norbert.protos.NorbertProtos.NorbertMessage parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); + return PARSER.parseFrom(data, extensionRegistry); } public static com.linkedin.norbert.protos.NorbertProtos.NorbertMessage parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); + return PARSER.parseFrom(data); } public static com.linkedin.norbert.protos.NorbertProtos.NorbertMessage parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); + return PARSER.parseFrom(data, extensionRegistry); } public static com.linkedin.norbert.protos.NorbertProtos.NorbertMessage parseFrom(java.io.InputStream input) throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); + return PARSER.parseFrom(input); } public static com.linkedin.norbert.protos.NorbertProtos.NorbertMessage parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); + return PARSER.parseFrom(input, extensionRegistry); } public static com.linkedin.norbert.protos.NorbertProtos.NorbertMessage parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } + return PARSER.parseDelimitedFrom(input); } public static com.linkedin.norbert.protos.NorbertProtos.NorbertMessage parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } + return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static com.linkedin.norbert.protos.NorbertProtos.NorbertMessage parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); + return PARSER.parseFrom(input); } public static com.linkedin.norbert.protos.NorbertProtos.NorbertMessage parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); + return PARSER.parseFrom(input, extensionRegistry); } - + public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(com.linkedin.norbert.protos.NorbertProtos.NorbertMessage prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } - + @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } + /** + * Protobuf type {@code norbert.NorbertMessage} + */ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder implements com.linkedin.norbert.protos.NorbertProtos.NorbertMessageOrBuilder { @@ -944,18 +1341,21 @@ public static final class Builder extends getDescriptor() { return com.linkedin.norbert.protos.NorbertProtos.internal_static_norbert_NorbertMessage_descriptor; } - + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return com.linkedin.norbert.protos.NorbertProtos.internal_static_norbert_NorbertMessage_fieldAccessorTable; + return com.linkedin.norbert.protos.NorbertProtos.internal_static_norbert_NorbertMessage_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.class, com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Builder.class); } - + // Construct using com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.newBuilder() private Builder() { maybeForceBuilderInitialization(); } - - private Builder(BuilderParent parent) { + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } @@ -967,7 +1367,7 @@ private void maybeForceBuilderInitialization() { private static Builder create() { return new Builder(); } - + public Builder clear() { super.clear(); requestIdMsb_ = 0L; @@ -990,20 +1390,20 @@ public Builder clear() { } return this; } - + public Builder clone() { return create().mergeFrom(buildPartial()); } - + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.getDescriptor(); + return com.linkedin.norbert.protos.NorbertProtos.internal_static_norbert_NorbertMessage_descriptor; } - + public com.linkedin.norbert.protos.NorbertProtos.NorbertMessage getDefaultInstanceForType() { return com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.getDefaultInstance(); } - + public com.linkedin.norbert.protos.NorbertProtos.NorbertMessage build() { com.linkedin.norbert.protos.NorbertProtos.NorbertMessage result = buildPartial(); if (!result.isInitialized()) { @@ -1011,17 +1411,7 @@ public com.linkedin.norbert.protos.NorbertProtos.NorbertMessage build() { } return result; } - - private com.linkedin.norbert.protos.NorbertProtos.NorbertMessage buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - com.linkedin.norbert.protos.NorbertProtos.NorbertMessage result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - + public com.linkedin.norbert.protos.NorbertProtos.NorbertMessage buildPartial() { com.linkedin.norbert.protos.NorbertProtos.NorbertMessage result = new com.linkedin.norbert.protos.NorbertProtos.NorbertMessage(this); int from_bitField0_ = bitField0_; @@ -1063,7 +1453,7 @@ public com.linkedin.norbert.protos.NorbertProtos.NorbertMessage buildPartial() { onBuilt(); return result; } - + public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof com.linkedin.norbert.protos.NorbertProtos.NorbertMessage) { return mergeFrom((com.linkedin.norbert.protos.NorbertProtos.NorbertMessage)other); @@ -1072,7 +1462,7 @@ public Builder mergeFrom(com.google.protobuf.Message other) { return this; } } - + public Builder mergeFrom(com.linkedin.norbert.protos.NorbertProtos.NorbertMessage other) { if (other == com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.getDefaultInstance()) return this; if (other.hasRequestIdMsb()) { @@ -1085,13 +1475,17 @@ public Builder mergeFrom(com.linkedin.norbert.protos.NorbertProtos.NorbertMessag setStatus(other.getStatus()); } if (other.hasMessageName()) { - setMessageName(other.getMessageName()); + bitField0_ |= 0x00000008; + messageName_ = other.messageName_; + onChanged(); } if (other.hasMessage()) { setMessage(other.getMessage()); } if (other.hasErrorMessage()) { - setErrorMessage(other.getErrorMessage()); + bitField0_ |= 0x00000020; + errorMessage_ = other.errorMessage_; + onChanged(); } if (headerBuilder_ == null) { if (!other.header_.isEmpty()) { @@ -1102,170 +1496,151 @@ public Builder mergeFrom(com.linkedin.norbert.protos.NorbertProtos.NorbertMessag ensureHeaderIsMutable(); header_.addAll(other.header_); } - onChanged(); - } - } else { - if (!other.header_.isEmpty()) { - if (headerBuilder_.isEmpty()) { - headerBuilder_.dispose(); - headerBuilder_ = null; - header_ = other.header_; - bitField0_ = (bitField0_ & ~0x00000040); - headerBuilder_ = - com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? - getHeaderFieldBuilder() : null; - } else { - headerBuilder_.addAllMessages(other.header_); - } - } - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasRequestIdMsb()) { - - return false; - } - if (!hasRequestIdLsb()) { - - return false; - } - if (!hasMessageName()) { - - return false; - } - for (int i = 0; i < getHeaderCount(); i++) { - if (!getHeader(i).isInitialized()) { - - return false; - } - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 9: { - bitField0_ |= 0x00000001; - requestIdMsb_ = input.readSFixed64(); - break; - } - case 17: { - bitField0_ |= 0x00000002; - requestIdLsb_ = input.readSFixed64(); - break; - } - case 80: { - int rawValue = input.readEnum(); - com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Status value = com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Status.valueOf(rawValue); - if (value == null) { - unknownFields.mergeVarintField(10, rawValue); - } else { - bitField0_ |= 0x00000004; - status_ = value; - } - break; - } - case 90: { - bitField0_ |= 0x00000008; - messageName_ = input.readBytes(); - break; - } - case 98: { - bitField0_ |= 0x00000010; - message_ = input.readBytes(); - break; - } - case 106: { - bitField0_ |= 0x00000020; - errorMessage_ = input.readBytes(); - break; - } - case 114: { - com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Header.Builder subBuilder = com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Header.newBuilder(); - input.readMessage(subBuilder, extensionRegistry); - addHeader(subBuilder.buildPartial()); - break; - } + onChanged(); + } + } else { + if (!other.header_.isEmpty()) { + if (headerBuilder_.isEmpty()) { + headerBuilder_.dispose(); + headerBuilder_ = null; + header_ = other.header_; + bitField0_ = (bitField0_ & ~0x00000040); + headerBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getHeaderFieldBuilder() : null; + } else { + headerBuilder_.addAllMessages(other.header_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasRequestIdMsb()) { + + return false; + } + if (!hasRequestIdLsb()) { + + return false; + } + if (!hasMessageName()) { + + return false; + } + for (int i = 0; i < getHeaderCount(); i++) { + if (!getHeader(i).isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.linkedin.norbert.protos.NorbertProtos.NorbertMessage parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (com.linkedin.norbert.protos.NorbertProtos.NorbertMessage) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); } } + return this; } - private int bitField0_; - + // required sfixed64 request_id_msb = 1; private long requestIdMsb_ ; + /** + * required sfixed64 request_id_msb = 1; + */ public boolean hasRequestIdMsb() { return ((bitField0_ & 0x00000001) == 0x00000001); } + /** + * required sfixed64 request_id_msb = 1; + */ public long getRequestIdMsb() { return requestIdMsb_; } + /** + * required sfixed64 request_id_msb = 1; + */ public Builder setRequestIdMsb(long value) { bitField0_ |= 0x00000001; requestIdMsb_ = value; onChanged(); return this; } + /** + * required sfixed64 request_id_msb = 1; + */ public Builder clearRequestIdMsb() { bitField0_ = (bitField0_ & ~0x00000001); requestIdMsb_ = 0L; onChanged(); return this; } - + // required sfixed64 request_id_lsb = 2; private long requestIdLsb_ ; + /** + * required sfixed64 request_id_lsb = 2; + */ public boolean hasRequestIdLsb() { return ((bitField0_ & 0x00000002) == 0x00000002); } + /** + * required sfixed64 request_id_lsb = 2; + */ public long getRequestIdLsb() { return requestIdLsb_; } + /** + * required sfixed64 request_id_lsb = 2; + */ public Builder setRequestIdLsb(long value) { bitField0_ |= 0x00000002; requestIdLsb_ = value; onChanged(); return this; } + /** + * required sfixed64 request_id_lsb = 2; + */ public Builder clearRequestIdLsb() { bitField0_ = (bitField0_ & ~0x00000002); requestIdLsb_ = 0L; onChanged(); return this; } - + // optional .norbert.NorbertMessage.Status status = 10 [default = OK]; private com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Status status_ = com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Status.OK; + /** + * optional .norbert.NorbertMessage.Status status = 10 [default = OK]; + */ public boolean hasStatus() { return ((bitField0_ & 0x00000004) == 0x00000004); } + /** + * optional .norbert.NorbertMessage.Status status = 10 [default = OK]; + */ public com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Status getStatus() { return status_; } + /** + * optional .norbert.NorbertMessage.Status status = 10 [default = OK]; + */ public Builder setStatus(com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Status value) { if (value == null) { throw new NullPointerException(); @@ -1275,29 +1650,59 @@ public Builder setStatus(com.linkedin.norbert.protos.NorbertProtos.NorbertMessag onChanged(); return this; } + /** + * optional .norbert.NorbertMessage.Status status = 10 [default = OK]; + */ public Builder clearStatus() { bitField0_ = (bitField0_ & ~0x00000004); status_ = com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Status.OK; onChanged(); return this; } - + // required string message_name = 11; private java.lang.Object messageName_ = ""; + /** + * required string message_name = 11; + */ public boolean hasMessageName() { return ((bitField0_ & 0x00000008) == 0x00000008); } - public String getMessageName() { + /** + * required string message_name = 11; + */ + public java.lang.String getMessageName() { java.lang.Object ref = messageName_; - if (!(ref instanceof String)) { - String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); messageName_ = s; return s; } else { - return (String) ref; + return (java.lang.String) ref; + } + } + /** + * required string message_name = 11; + */ + public com.google.protobuf.ByteString + getMessageNameBytes() { + java.lang.Object ref = messageName_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + messageName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; } } - public Builder setMessageName(String value) { + /** + * required string message_name = 11; + */ + public Builder setMessageName( + java.lang.String value) { if (value == null) { throw new NullPointerException(); } @@ -1306,26 +1711,46 @@ public Builder setMessageName(String value) { onChanged(); return this; } + /** + * required string message_name = 11; + */ public Builder clearMessageName() { bitField0_ = (bitField0_ & ~0x00000008); messageName_ = getDefaultInstance().getMessageName(); onChanged(); return this; } - void setMessageName(com.google.protobuf.ByteString value) { - bitField0_ |= 0x00000008; + /** + * required string message_name = 11; + */ + public Builder setMessageNameBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000008; messageName_ = value; onChanged(); + return this; } - + // optional bytes message = 12; private com.google.protobuf.ByteString message_ = com.google.protobuf.ByteString.EMPTY; + /** + * optional bytes message = 12; + */ public boolean hasMessage() { return ((bitField0_ & 0x00000010) == 0x00000010); } + /** + * optional bytes message = 12; + */ public com.google.protobuf.ByteString getMessage() { return message_; } + /** + * optional bytes message = 12; + */ public Builder setMessage(com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); @@ -1335,29 +1760,59 @@ public Builder setMessage(com.google.protobuf.ByteString value) { onChanged(); return this; } + /** + * optional bytes message = 12; + */ public Builder clearMessage() { bitField0_ = (bitField0_ & ~0x00000010); message_ = getDefaultInstance().getMessage(); onChanged(); return this; } - + // optional string error_message = 13; private java.lang.Object errorMessage_ = ""; + /** + * optional string error_message = 13; + */ public boolean hasErrorMessage() { return ((bitField0_ & 0x00000020) == 0x00000020); } - public String getErrorMessage() { + /** + * optional string error_message = 13; + */ + public java.lang.String getErrorMessage() { java.lang.Object ref = errorMessage_; - if (!(ref instanceof String)) { - String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); errorMessage_ = s; return s; } else { - return (String) ref; + return (java.lang.String) ref; + } + } + /** + * optional string error_message = 13; + */ + public com.google.protobuf.ByteString + getErrorMessageBytes() { + java.lang.Object ref = errorMessage_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + errorMessage_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; } } - public Builder setErrorMessage(String value) { + /** + * optional string error_message = 13; + */ + public Builder setErrorMessage( + java.lang.String value) { if (value == null) { throw new NullPointerException(); } @@ -1366,18 +1821,29 @@ public Builder setErrorMessage(String value) { onChanged(); return this; } + /** + * optional string error_message = 13; + */ public Builder clearErrorMessage() { bitField0_ = (bitField0_ & ~0x00000020); errorMessage_ = getDefaultInstance().getErrorMessage(); onChanged(); return this; } - void setErrorMessage(com.google.protobuf.ByteString value) { - bitField0_ |= 0x00000020; + /** + * optional string error_message = 13; + */ + public Builder setErrorMessageBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000020; errorMessage_ = value; onChanged(); + return this; } - + // repeated .norbert.NorbertMessage.Header header = 14; private java.util.List header_ = java.util.Collections.emptyList(); @@ -1387,10 +1853,13 @@ private void ensureHeaderIsMutable() { bitField0_ |= 0x00000040; } } - + private com.google.protobuf.RepeatedFieldBuilder< com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Header, com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Header.Builder, com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.HeaderOrBuilder> headerBuilder_; - + + /** + * repeated .norbert.NorbertMessage.Header header = 14; + */ public java.util.List getHeaderList() { if (headerBuilder_ == null) { return java.util.Collections.unmodifiableList(header_); @@ -1398,6 +1867,9 @@ public java.util.Listrepeated .norbert.NorbertMessage.Header header = 14; + */ public int getHeaderCount() { if (headerBuilder_ == null) { return header_.size(); @@ -1405,6 +1877,9 @@ public int getHeaderCount() { return headerBuilder_.getCount(); } } + /** + * repeated .norbert.NorbertMessage.Header header = 14; + */ public com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Header getHeader(int index) { if (headerBuilder_ == null) { return header_.get(index); @@ -1412,6 +1887,9 @@ public com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Header getHeader return headerBuilder_.getMessage(index); } } + /** + * repeated .norbert.NorbertMessage.Header header = 14; + */ public Builder setHeader( int index, com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Header value) { if (headerBuilder_ == null) { @@ -1426,6 +1904,9 @@ public Builder setHeader( } return this; } + /** + * repeated .norbert.NorbertMessage.Header header = 14; + */ public Builder setHeader( int index, com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Header.Builder builderForValue) { if (headerBuilder_ == null) { @@ -1437,6 +1918,9 @@ public Builder setHeader( } return this; } + /** + * repeated .norbert.NorbertMessage.Header header = 14; + */ public Builder addHeader(com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Header value) { if (headerBuilder_ == null) { if (value == null) { @@ -1450,6 +1934,9 @@ public Builder addHeader(com.linkedin.norbert.protos.NorbertProtos.NorbertMessag } return this; } + /** + * repeated .norbert.NorbertMessage.Header header = 14; + */ public Builder addHeader( int index, com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Header value) { if (headerBuilder_ == null) { @@ -1464,6 +1951,9 @@ public Builder addHeader( } return this; } + /** + * repeated .norbert.NorbertMessage.Header header = 14; + */ public Builder addHeader( com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Header.Builder builderForValue) { if (headerBuilder_ == null) { @@ -1475,6 +1965,9 @@ public Builder addHeader( } return this; } + /** + * repeated .norbert.NorbertMessage.Header header = 14; + */ public Builder addHeader( int index, com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Header.Builder builderForValue) { if (headerBuilder_ == null) { @@ -1486,6 +1979,9 @@ public Builder addHeader( } return this; } + /** + * repeated .norbert.NorbertMessage.Header header = 14; + */ public Builder addAllHeader( java.lang.Iterable values) { if (headerBuilder_ == null) { @@ -1497,6 +1993,9 @@ public Builder addAllHeader( } return this; } + /** + * repeated .norbert.NorbertMessage.Header header = 14; + */ public Builder clearHeader() { if (headerBuilder_ == null) { header_ = java.util.Collections.emptyList(); @@ -1507,6 +2006,9 @@ public Builder clearHeader() { } return this; } + /** + * repeated .norbert.NorbertMessage.Header header = 14; + */ public Builder removeHeader(int index) { if (headerBuilder_ == null) { ensureHeaderIsMutable(); @@ -1517,10 +2019,16 @@ public Builder removeHeader(int index) { } return this; } + /** + * repeated .norbert.NorbertMessage.Header header = 14; + */ public com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Header.Builder getHeaderBuilder( int index) { return getHeaderFieldBuilder().getBuilder(index); } + /** + * repeated .norbert.NorbertMessage.Header header = 14; + */ public com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.HeaderOrBuilder getHeaderOrBuilder( int index) { if (headerBuilder_ == null) { @@ -1528,6 +2036,9 @@ public com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.HeaderOrBuilder return headerBuilder_.getMessageOrBuilder(index); } } + /** + * repeated .norbert.NorbertMessage.Header header = 14; + */ public java.util.List getHeaderOrBuilderList() { if (headerBuilder_ != null) { @@ -1536,15 +2047,24 @@ public com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.HeaderOrBuilder return java.util.Collections.unmodifiableList(header_); } } + /** + * repeated .norbert.NorbertMessage.Header header = 14; + */ public com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Header.Builder addHeaderBuilder() { return getHeaderFieldBuilder().addBuilder( com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Header.getDefaultInstance()); } + /** + * repeated .norbert.NorbertMessage.Header header = 14; + */ public com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Header.Builder addHeaderBuilder( int index) { return getHeaderFieldBuilder().addBuilder( index, com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Header.getDefaultInstance()); } + /** + * repeated .norbert.NorbertMessage.Header header = 14; + */ public java.util.List getHeaderBuilderList() { return getHeaderFieldBuilder().getBuilderList(); @@ -1563,144 +2083,309 @@ public com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Header.Builder a } return headerBuilder_; } - + // @@protoc_insertion_point(builder_scope:norbert.NorbertMessage) } - + static { defaultInstance = new NorbertMessage(true); defaultInstance.initFields(); } - + // @@protoc_insertion_point(class_scope:norbert.NorbertMessage) } - + public interface NodeOrBuilder extends com.google.protobuf.MessageOrBuilder { - + // required int32 id = 1; + /** + * required int32 id = 1; + */ boolean hasId(); + /** + * required int32 id = 1; + */ int getId(); - + // required string url = 2; + /** + * required string url = 2; + */ boolean hasUrl(); - String getUrl(); - + /** + * required string url = 2; + */ + java.lang.String getUrl(); + /** + * required string url = 2; + */ + com.google.protobuf.ByteString + getUrlBytes(); + // repeated int32 partition = 3; + /** + * repeated int32 partition = 3; + */ java.util.List getPartitionList(); + /** + * repeated int32 partition = 3; + */ int getPartitionCount(); + /** + * repeated int32 partition = 3; + */ int getPartition(int index); - + // optional int64 persistentCapability = 4; + /** + * optional int64 persistentCapability = 4; + */ boolean hasPersistentCapability(); + /** + * optional int64 persistentCapability = 4; + */ long getPersistentCapability(); } + /** + * Protobuf type {@code norbert.Node} + */ public static final class Node extends com.google.protobuf.GeneratedMessage implements NodeOrBuilder { // Use Node.newBuilder() to construct. - private Node(Builder builder) { + private Node(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); + this.unknownFields = builder.getUnknownFields(); } - private Node(boolean noInit) {} - + private Node(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + private static final Node defaultInstance; public static Node getDefaultInstance() { return defaultInstance; } - + public Node getDefaultInstanceForType() { return defaultInstance; } - + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private Node( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 8: { + bitField0_ |= 0x00000001; + id_ = input.readInt32(); + break; + } + case 18: { + bitField0_ |= 0x00000002; + url_ = input.readBytes(); + break; + } + case 24: { + if (!((mutable_bitField0_ & 0x00000004) == 0x00000004)) { + partition_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000004; + } + partition_.add(input.readInt32()); + break; + } + case 26: { + int length = input.readRawVarint32(); + int limit = input.pushLimit(length); + if (!((mutable_bitField0_ & 0x00000004) == 0x00000004) && input.getBytesUntilLimit() > 0) { + partition_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000004; + } + while (input.getBytesUntilLimit() > 0) { + partition_.add(input.readInt32()); + } + input.popLimit(limit); + break; + } + case 32: { + bitField0_ |= 0x00000004; + persistentCapability_ = input.readInt64(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000004) == 0x00000004)) { + partition_ = java.util.Collections.unmodifiableList(partition_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return com.linkedin.norbert.protos.NorbertProtos.internal_static_norbert_Node_descriptor; } - + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return com.linkedin.norbert.protos.NorbertProtos.internal_static_norbert_Node_fieldAccessorTable; + return com.linkedin.norbert.protos.NorbertProtos.internal_static_norbert_Node_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.linkedin.norbert.protos.NorbertProtos.Node.class, com.linkedin.norbert.protos.NorbertProtos.Node.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public Node parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new Node(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; } - + private int bitField0_; // required int32 id = 1; public static final int ID_FIELD_NUMBER = 1; private int id_; + /** + * required int32 id = 1; + */ public boolean hasId() { return ((bitField0_ & 0x00000001) == 0x00000001); } + /** + * required int32 id = 1; + */ public int getId() { return id_; } - + // required string url = 2; public static final int URL_FIELD_NUMBER = 2; private java.lang.Object url_; + /** + * required string url = 2; + */ public boolean hasUrl() { return ((bitField0_ & 0x00000002) == 0x00000002); } - public String getUrl() { + /** + * required string url = 2; + */ + public java.lang.String getUrl() { java.lang.Object ref = url_; - if (ref instanceof String) { - return (String) ref; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; } else { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; - String s = bs.toStringUtf8(); - if (com.google.protobuf.Internal.isValidUtf8(bs)) { + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { url_ = s; } return s; } } - private com.google.protobuf.ByteString getUrlBytes() { + /** + * required string url = 2; + */ + public com.google.protobuf.ByteString + getUrlBytes() { java.lang.Object ref = url_; - if (ref instanceof String) { + if (ref instanceof java.lang.String) { com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((String) ref); + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); url_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } - + // repeated int32 partition = 3; public static final int PARTITION_FIELD_NUMBER = 3; private java.util.List partition_; + /** + * repeated int32 partition = 3; + */ public java.util.List getPartitionList() { return partition_; } + /** + * repeated int32 partition = 3; + */ public int getPartitionCount() { return partition_.size(); } + /** + * repeated int32 partition = 3; + */ public int getPartition(int index) { return partition_.get(index); } - + // optional int64 persistentCapability = 4; public static final int PERSISTENTCAPABILITY_FIELD_NUMBER = 4; private long persistentCapability_; + /** + * optional int64 persistentCapability = 4; + */ public boolean hasPersistentCapability() { return ((bitField0_ & 0x00000004) == 0x00000004); } + /** + * optional int64 persistentCapability = 4; + */ public long getPersistentCapability() { return persistentCapability_; } - + private void initFields() { id_ = 0; url_ = ""; - partition_ = java.util.Collections.emptyList();; + partition_ = java.util.Collections.emptyList(); persistentCapability_ = 0L; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; - + if (!hasId()) { memoizedIsInitialized = 0; return false; @@ -1712,7 +2397,7 @@ public final boolean isInitialized() { memoizedIsInitialized = 1; return true; } - + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); @@ -1730,12 +2415,12 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) } getUnknownFields().writeTo(output); } - + private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; - + size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream @@ -1762,94 +2447,83 @@ public int getSerializedSize() { memoizedSerializedSize = size; return size; } - + private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } - + public static com.linkedin.norbert.protos.NorbertProtos.Node parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); + return PARSER.parseFrom(data); } public static com.linkedin.norbert.protos.NorbertProtos.Node parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); + return PARSER.parseFrom(data, extensionRegistry); } public static com.linkedin.norbert.protos.NorbertProtos.Node parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); + return PARSER.parseFrom(data); } public static com.linkedin.norbert.protos.NorbertProtos.Node parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); + return PARSER.parseFrom(data, extensionRegistry); } public static com.linkedin.norbert.protos.NorbertProtos.Node parseFrom(java.io.InputStream input) throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); + return PARSER.parseFrom(input); } public static com.linkedin.norbert.protos.NorbertProtos.Node parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); + return PARSER.parseFrom(input, extensionRegistry); } public static com.linkedin.norbert.protos.NorbertProtos.Node parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } + return PARSER.parseDelimitedFrom(input); } public static com.linkedin.norbert.protos.NorbertProtos.Node parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } + return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static com.linkedin.norbert.protos.NorbertProtos.Node parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); + return PARSER.parseFrom(input); } public static com.linkedin.norbert.protos.NorbertProtos.Node parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); + return PARSER.parseFrom(input, extensionRegistry); } - + public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(com.linkedin.norbert.protos.NorbertProtos.Node prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } - + @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } + /** + * Protobuf type {@code norbert.Node} + */ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder implements com.linkedin.norbert.protos.NorbertProtos.NodeOrBuilder { @@ -1857,18 +2531,21 @@ public static final class Builder extends getDescriptor() { return com.linkedin.norbert.protos.NorbertProtos.internal_static_norbert_Node_descriptor; } - + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return com.linkedin.norbert.protos.NorbertProtos.internal_static_norbert_Node_fieldAccessorTable; + return com.linkedin.norbert.protos.NorbertProtos.internal_static_norbert_Node_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.linkedin.norbert.protos.NorbertProtos.Node.class, com.linkedin.norbert.protos.NorbertProtos.Node.Builder.class); } - + // Construct using com.linkedin.norbert.protos.NorbertProtos.Node.newBuilder() private Builder() { maybeForceBuilderInitialization(); } - - private Builder(BuilderParent parent) { + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } @@ -1879,33 +2556,33 @@ private void maybeForceBuilderInitialization() { private static Builder create() { return new Builder(); } - + public Builder clear() { super.clear(); id_ = 0; bitField0_ = (bitField0_ & ~0x00000001); url_ = ""; bitField0_ = (bitField0_ & ~0x00000002); - partition_ = java.util.Collections.emptyList();; + partition_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000004); persistentCapability_ = 0L; bitField0_ = (bitField0_ & ~0x00000008); return this; } - + public Builder clone() { return create().mergeFrom(buildPartial()); } - + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return com.linkedin.norbert.protos.NorbertProtos.Node.getDescriptor(); + return com.linkedin.norbert.protos.NorbertProtos.internal_static_norbert_Node_descriptor; } - + public com.linkedin.norbert.protos.NorbertProtos.Node getDefaultInstanceForType() { return com.linkedin.norbert.protos.NorbertProtos.Node.getDefaultInstance(); } - + public com.linkedin.norbert.protos.NorbertProtos.Node build() { com.linkedin.norbert.protos.NorbertProtos.Node result = buildPartial(); if (!result.isInitialized()) { @@ -1913,17 +2590,7 @@ public com.linkedin.norbert.protos.NorbertProtos.Node build() { } return result; } - - private com.linkedin.norbert.protos.NorbertProtos.Node buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - com.linkedin.norbert.protos.NorbertProtos.Node result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - + public com.linkedin.norbert.protos.NorbertProtos.Node buildPartial() { com.linkedin.norbert.protos.NorbertProtos.Node result = new com.linkedin.norbert.protos.NorbertProtos.Node(this); int from_bitField0_ = bitField0_; @@ -1949,7 +2616,7 @@ public com.linkedin.norbert.protos.NorbertProtos.Node buildPartial() { onBuilt(); return result; } - + public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof com.linkedin.norbert.protos.NorbertProtos.Node) { return mergeFrom((com.linkedin.norbert.protos.NorbertProtos.Node)other); @@ -1958,14 +2625,16 @@ public Builder mergeFrom(com.google.protobuf.Message other) { return this; } } - + public Builder mergeFrom(com.linkedin.norbert.protos.NorbertProtos.Node other) { if (other == com.linkedin.norbert.protos.NorbertProtos.Node.getDefaultInstance()) return this; if (other.hasId()) { setId(other.getId()); } if (other.hasUrl()) { - setUrl(other.getUrl()); + bitField0_ |= 0x00000002; + url_ = other.url_; + onChanged(); } if (!other.partition_.isEmpty()) { if (partition_.isEmpty()) { @@ -1983,7 +2652,7 @@ public Builder mergeFrom(com.linkedin.norbert.protos.NorbertProtos.Node other) { this.mergeUnknownFields(other.getUnknownFields()); return this; } - + public final boolean isInitialized() { if (!hasId()) { @@ -1995,102 +2664,102 @@ public final boolean isInitialized() { } return true; } - + public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 8: { - bitField0_ |= 0x00000001; - id_ = input.readInt32(); - break; - } - case 18: { - bitField0_ |= 0x00000002; - url_ = input.readBytes(); - break; - } - case 24: { - ensurePartitionIsMutable(); - partition_.add(input.readInt32()); - break; - } - case 26: { - int length = input.readRawVarint32(); - int limit = input.pushLimit(length); - while (input.getBytesUntilLimit() > 0) { - addPartition(input.readInt32()); - } - input.popLimit(limit); - break; - } - case 32: { - bitField0_ |= 0x00000008; - persistentCapability_ = input.readInt64(); - break; - } + com.linkedin.norbert.protos.NorbertProtos.Node parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (com.linkedin.norbert.protos.NorbertProtos.Node) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); } } + return this; } - private int bitField0_; - + // required int32 id = 1; private int id_ ; + /** + * required int32 id = 1; + */ public boolean hasId() { return ((bitField0_ & 0x00000001) == 0x00000001); } + /** + * required int32 id = 1; + */ public int getId() { return id_; } + /** + * required int32 id = 1; + */ public Builder setId(int value) { bitField0_ |= 0x00000001; id_ = value; onChanged(); return this; } + /** + * required int32 id = 1; + */ public Builder clearId() { bitField0_ = (bitField0_ & ~0x00000001); id_ = 0; onChanged(); return this; } - + // required string url = 2; private java.lang.Object url_ = ""; + /** + * required string url = 2; + */ public boolean hasUrl() { return ((bitField0_ & 0x00000002) == 0x00000002); } - public String getUrl() { + /** + * required string url = 2; + */ + public java.lang.String getUrl() { java.lang.Object ref = url_; - if (!(ref instanceof String)) { - String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); url_ = s; return s; } else { - return (String) ref; + return (java.lang.String) ref; + } + } + /** + * required string url = 2; + */ + public com.google.protobuf.ByteString + getUrlBytes() { + java.lang.Object ref = url_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + url_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; } } - public Builder setUrl(String value) { + /** + * required string url = 2; + */ + public Builder setUrl( + java.lang.String value) { if (value == null) { throw new NullPointerException(); } @@ -2099,36 +2768,59 @@ public Builder setUrl(String value) { onChanged(); return this; } + /** + * required string url = 2; + */ public Builder clearUrl() { bitField0_ = (bitField0_ & ~0x00000002); url_ = getDefaultInstance().getUrl(); onChanged(); return this; } - void setUrl(com.google.protobuf.ByteString value) { - bitField0_ |= 0x00000002; + /** + * required string url = 2; + */ + public Builder setUrlBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; url_ = value; onChanged(); + return this; } - + // repeated int32 partition = 3; - private java.util.List partition_ = java.util.Collections.emptyList();; + private java.util.List partition_ = java.util.Collections.emptyList(); private void ensurePartitionIsMutable() { if (!((bitField0_ & 0x00000004) == 0x00000004)) { partition_ = new java.util.ArrayList(partition_); bitField0_ |= 0x00000004; } } + /** + * repeated int32 partition = 3; + */ public java.util.List getPartitionList() { return java.util.Collections.unmodifiableList(partition_); } + /** + * repeated int32 partition = 3; + */ public int getPartitionCount() { return partition_.size(); } + /** + * repeated int32 partition = 3; + */ public int getPartition(int index) { return partition_.get(index); } + /** + * repeated int32 partition = 3; + */ public Builder setPartition( int index, int value) { ensurePartitionIsMutable(); @@ -2136,12 +2828,18 @@ public Builder setPartition( onChanged(); return this; } + /** + * repeated int32 partition = 3; + */ public Builder addPartition(int value) { ensurePartitionIsMutable(); partition_.add(value); onChanged(); return this; } + /** + * repeated int32 partition = 3; + */ public Builder addAllPartition( java.lang.Iterable values) { ensurePartitionIsMutable(); @@ -2149,45 +2847,60 @@ public Builder addAllPartition( onChanged(); return this; } + /** + * repeated int32 partition = 3; + */ public Builder clearPartition() { - partition_ = java.util.Collections.emptyList();; + partition_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000004); onChanged(); return this; } - + // optional int64 persistentCapability = 4; private long persistentCapability_ ; + /** + * optional int64 persistentCapability = 4; + */ public boolean hasPersistentCapability() { return ((bitField0_ & 0x00000008) == 0x00000008); } + /** + * optional int64 persistentCapability = 4; + */ public long getPersistentCapability() { return persistentCapability_; } + /** + * optional int64 persistentCapability = 4; + */ public Builder setPersistentCapability(long value) { bitField0_ |= 0x00000008; persistentCapability_ = value; onChanged(); return this; } + /** + * optional int64 persistentCapability = 4; + */ public Builder clearPersistentCapability() { bitField0_ = (bitField0_ & ~0x00000008); persistentCapability_ = 0L; onChanged(); return this; } - + // @@protoc_insertion_point(builder_scope:norbert.Node) } - + static { defaultInstance = new Node(true); defaultInstance.initFields(); } - + // @@protoc_insertion_point(class_scope:norbert.Node) } - + private static com.google.protobuf.Descriptors.Descriptor internal_static_norbert_NorbertMessage_descriptor; private static @@ -2203,7 +2916,7 @@ public Builder clearPersistentCapability() { private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_norbert_Node_fieldAccessorTable; - + public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { return descriptor; @@ -2212,18 +2925,18 @@ public Builder clearPersistentCapability() { descriptor; static { java.lang.String[] descriptorData = { - "\n\rnorbert.proto\022\007norbert\"\264\002\n\016NorbertMess" + - "age\022\026\n\016request_id_msb\030\001 \002(\020\022\026\n\016request_i" + - "d_lsb\030\002 \002(\020\0222\n\006status\030\n \001(\0162\036.norbert.No" + - "rbertMessage.Status:\002OK\022\024\n\014message_name\030" + - "\013 \002(\t\022\017\n\007message\030\014 \001(\014\022\025\n\rerror_message\030" + - "\r \001(\t\022.\n\006header\030\016 \003(\0132\036.norbert.NorbertM" + - "essage.Header\032$\n\006Header\022\013\n\003key\030\001 \002(\t\022\r\n\005" + - "value\030\002 \001(\t\"*\n\006Status\022\006\n\002OK\020\000\022\t\n\005ERROR\020\001" + - "\022\r\n\tHEAVYLOAD\020\002\"P\n\004Node\022\n\n\002id\030\001 \002(\005\022\013\n\003u" + - "rl\030\002 \002(\t\022\021\n\tpartition\030\003 \003(\005\022\034\n\024persisten", - "tCapability\030\004 \001(\003B.\n\033com.linkedin.norber" + - "t.protosB\rNorbertProtosH\001" + "\n\026protobuf/norbert.proto\022\007norbert\"\264\002\n\016No" + + "rbertMessage\022\026\n\016request_id_msb\030\001 \002(\020\022\026\n\016" + + "request_id_lsb\030\002 \002(\020\0222\n\006status\030\n \001(\0162\036.n" + + "orbert.NorbertMessage.Status:\002OK\022\024\n\014mess" + + "age_name\030\013 \002(\t\022\017\n\007message\030\014 \001(\014\022\025\n\rerror" + + "_message\030\r \001(\t\022.\n\006header\030\016 \003(\0132\036.norbert" + + ".NorbertMessage.Header\032$\n\006Header\022\013\n\003key\030" + + "\001 \002(\t\022\r\n\005value\030\002 \001(\t\"*\n\006Status\022\006\n\002OK\020\000\022\t" + + "\n\005ERROR\020\001\022\r\n\tHEAVYLOAD\020\002\"P\n\004Node\022\n\n\002id\030\001" + + " \002(\005\022\013\n\003url\030\002 \002(\t\022\021\n\tpartition\030\003 \003(\005\022\034\n\024", + "persistentCapability\030\004 \001(\003B.\n\033com.linked" + + "in.norbert.protosB\rNorbertProtosH\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -2235,25 +2948,19 @@ public com.google.protobuf.ExtensionRegistry assignDescriptors( internal_static_norbert_NorbertMessage_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_norbert_NorbertMessage_descriptor, - new java.lang.String[] { "RequestIdMsb", "RequestIdLsb", "Status", "MessageName", "Message", "ErrorMessage", "Header", }, - com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.class, - com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Builder.class); + new java.lang.String[] { "RequestIdMsb", "RequestIdLsb", "Status", "MessageName", "Message", "ErrorMessage", "Header", }); internal_static_norbert_NorbertMessage_Header_descriptor = internal_static_norbert_NorbertMessage_descriptor.getNestedTypes().get(0); internal_static_norbert_NorbertMessage_Header_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_norbert_NorbertMessage_Header_descriptor, - new java.lang.String[] { "Key", "Value", }, - com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Header.class, - com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Header.Builder.class); + new java.lang.String[] { "Key", "Value", }); internal_static_norbert_Node_descriptor = getDescriptor().getMessageTypes().get(1); internal_static_norbert_Node_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_norbert_Node_descriptor, - new java.lang.String[] { "Id", "Url", "Partition", "PersistentCapability", }, - com.linkedin.norbert.protos.NorbertProtos.Node.class, - com.linkedin.norbert.protos.NorbertProtos.Node.Builder.class); + new java.lang.String[] { "Id", "Url", "Partition", "PersistentCapability", }); return null; } }; @@ -2262,6 +2969,6 @@ public com.google.protobuf.ExtensionRegistry assignDescriptors( new com.google.protobuf.Descriptors.FileDescriptor[] { }, assigner); } - + // @@protoc_insertion_point(outer_class_scope) } diff --git a/network/src/main/java/com/google/protobuf/ByteStringUtils.java b/network/src/main/java/com/google/protobuf/ByteStringUtils.java new file mode 100644 index 00000000..77666a2a --- /dev/null +++ b/network/src/main/java/com/google/protobuf/ByteStringUtils.java @@ -0,0 +1,14 @@ +package com.google.protobuf; + +/** + * A small utility to avoid byte copying of arrays by protocol buffers. + * + * Trades safety/security for performance. + * + */ +public class ByteStringUtils { + + public static ByteString wrap(byte[] buffer) { + return new LiteralByteString(buffer); + } +} diff --git a/network/src/main/scala/com/linkedin/norbert/network/util/ProtoUtils.scala b/network/src/main/scala/com/linkedin/norbert/network/util/ProtoUtils.scala index 3148af81..ce595bab 100644 --- a/network/src/main/scala/com/linkedin/norbert/network/util/ProtoUtils.scala +++ b/network/src/main/scala/com/linkedin/norbert/network/util/ProtoUtils.scala @@ -2,7 +2,7 @@ package com.linkedin.norbert package network package util -import com.google.protobuf.ByteString +import com.google.protobuf.{ByteStringUtils, ByteString, LiteralByteString} import logging.Logging import java.lang.reflect.{Field, Constructor} @@ -12,16 +12,6 @@ import java.lang.reflect.{Field, Constructor} * to bypass those. */ object ProtoUtils extends Logging { - private val byteStringConstructor: Constructor[ByteString] = try { - val c = classOf[ByteString].getDeclaredConstructor(classOf[Array[Byte]]) - c.setAccessible(true) - c - } catch { - case ex: Exception => - log.warn(ex, "Cannot eliminate a copy when converting a byte[] to a ByteString") - null - } - private val byteStringField: Field = try { val f = classOf[ByteString].getDeclaredField("bytes") f.setAccessible(true) @@ -47,16 +37,7 @@ object ProtoUtils extends Logging { } private final def fastByteArrayToByteString(byteArray: Array[Byte]): ByteString = { - if(byteStringConstructor != null) - try { - byteStringConstructor.newInstance(byteArray) - } catch { - case ex: Exception => - log.warn(ex, "Encountered exception invoking the private ByteString constructor, falling back to safe method") - slowByteArrayToByteString(byteArray) - } - else - slowByteArrayToByteString(byteArray) + ByteStringUtils.wrap(byteArray) } private final def slowByteArrayToByteString(byteArray: Array[Byte]): ByteString = { From a2fde76115fb963c3de4d8571c0c76f67c085439 Mon Sep 17 00:00:00 2001 From: Nick Hristov Date: Wed, 22 Oct 2014 06:08:20 -0700 Subject: [PATCH 06/20] Restored version. Had reset it by accident --- gradle.properties | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gradle.properties b/gradle.properties index 221edcf6..2a70dfe7 100644 --- a/gradle.properties +++ b/gradle.properties @@ -1,4 +1,4 @@ -version=0.6.62 +version=0.6.82 defaultScalaVersion=2.10.3 targetScalaVersions=2.10.3 crossBuild=false From 52863ab3f51c26a47d4849f1dbc5efe038632877 Mon Sep 17 00:00:00 2001 From: Nick Hristov Date: Wed, 22 Oct 2014 06:08:52 -0700 Subject: [PATCH 07/20] Adding maven artifact publishing support --- build.gradle | 3 +++ 1 file changed, 3 insertions(+) diff --git a/build.gradle b/build.gradle index d043557d..f8ad6284 100644 --- a/build.gradle +++ b/build.gradle @@ -14,6 +14,9 @@ def getScalaSuffix(scalaVersion) { } subprojects { + apply plugin: 'maven' + + group = 'com.linkedin.norbert' // the cross built scala modules share the same source directories so we need to make their output directories unique buildDir = "${rootProject.buildDir}/$name" From 95385e01b7179db540e405e2a244d3227fcc2e31 Mon Sep 17 00:00:00 2001 From: Nick Hristov Date: Fri, 24 Oct 2014 11:56:38 -0700 Subject: [PATCH 08/20] A lot of changes related to the capability to fail-over a request to a different node if the node goes down (and we do not notice immediately due to zookeeper delays). --- ...ConsistentHashPartitionedLoadBalancer.java | 27 +++++--- .../network/PartitionedLoadBalancer.java | 39 ++++++++++- .../RingHashPartitionedLoadBalancer.java | 20 +++++- .../network/JavaLbfToScalaLbf.scala | 28 ++++++-- .../network/ScalaLbfToJavaLbf.scala | 34 ++++++++-- .../network/netty/NettyNetworkClient.scala | 7 +- .../PartitionedNetworkClient.scala | 65 ++++++++++++++++--- .../DefaultClusteredLoadBalancerFactory.scala | 18 ++++- .../DefaultLoadBalancerHelper.scala | 38 +++++++++-- ...efaultPartitionedLoadBalancerFactory.scala | 22 +++++-- ...dConsistentHashedLoadBalancerFactory.scala | 35 ++++++++-- .../PartitionedLoadBalancerFactory.scala | 16 ++++- ...eConsistentHashedLoadBalancerFactory.scala | 8 ++- .../PartitionedNetworkClientSpec.scala | 20 ++++-- ...onedConsistentHashedLoadBalancerSpec.scala | 44 ++++++++++--- 15 files changed, 347 insertions(+), 74 deletions(-) diff --git a/java-network/src/main/java/com/linkedin/norbert/javacompat/network/ConsistentHashPartitionedLoadBalancer.java b/java-network/src/main/java/com/linkedin/norbert/javacompat/network/ConsistentHashPartitionedLoadBalancer.java index 97f1e178..70f1338a 100644 --- a/java-network/src/main/java/com/linkedin/norbert/javacompat/network/ConsistentHashPartitionedLoadBalancer.java +++ b/java-network/src/main/java/com/linkedin/norbert/javacompat/network/ConsistentHashPartitionedLoadBalancer.java @@ -1,15 +1,9 @@ package com.linkedin.norbert.javacompat.network; -import java.util.AbstractMap; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Map; -import java.util.NavigableMap; -import java.util.Set; -import java.util.TreeMap; - import com.linkedin.norbert.javacompat.cluster.Node; +import java.util.*; + public class ConsistentHashPartitionedLoadBalancer implements PartitionedLoadBalancer @@ -133,7 +127,22 @@ public Boolean apply(Endpoint key) { return endpoint.getNode(); } - @Override + @Override + public LinkedHashSet nextNodes(PartitionedId partitionedId) { + return null; + } + + @Override + public LinkedHashSet nextNodes(PartitionedId partitionedId, Long capability) { + return null; + } + + @Override + public LinkedHashSet nextNodes(PartitionedId partitionedId, Long capability, Long persistentCapability) { + return null; + } + + @Override public Set nodesForPartitionedId(PartitionedId partitionedId) { return nodesForPartitionedId(partitionedId, 0L, 0L); } diff --git a/java-network/src/main/java/com/linkedin/norbert/javacompat/network/PartitionedLoadBalancer.java b/java-network/src/main/java/com/linkedin/norbert/javacompat/network/PartitionedLoadBalancer.java index d3fd718a..92a10cce 100644 --- a/java-network/src/main/java/com/linkedin/norbert/javacompat/network/PartitionedLoadBalancer.java +++ b/java-network/src/main/java/com/linkedin/norbert/javacompat/network/PartitionedLoadBalancer.java @@ -15,10 +15,11 @@ */ package com.linkedin.norbert.javacompat.network; +import com.linkedin.norbert.javacompat.cluster.Node; + +import java.util.LinkedHashSet; import java.util.Map; import java.util.Set; -import java.lang.Long; -import com.linkedin.norbert.javacompat.cluster.Node; /** * A PartitionedLoadBalancer handles calculating the next Node a message should be routed to @@ -59,7 +60,39 @@ public interface PartitionedLoadBalancer { */ Node nextNode(PartitionedId id, Long capability, Long persistentCapability); - /** + + /** + * Returns the next Node a message should be routed to based on the PartitionId provided. + * + * @param id the id to be used to calculate partitioning information. + * + * @return the Node to route the next message to + */ + LinkedHashSet nextNodes(PartitionedId id); + + /** + * Returns the next Node a message should be routed to based on the PartitionId provided. + * + * @param id the id to be used to calculate partitioning information. + * @param capability the minimal capability required by client + * + * @return the Node to route the next message to + */ + LinkedHashSet nextNodes(PartitionedId id, Long capability); + + /** + * Returns the next Node a message should be routed to based on the PartitionId provided. + * + * @param id the id to be used to calculate partitioning information. + * @param capability the minimal capability required by client + * @param persistentCapability the capability of more persistent nature + * + * @return the Node to route the next message to + */ + LinkedHashSet nextNodes(PartitionedId id, Long capability, Long persistentCapability); + + + /** * Returns all replica nodes for the same partitionedId * @return the Nodes to multicast the next messages to each replica */ diff --git a/java-network/src/main/java/com/linkedin/norbert/javacompat/network/RingHashPartitionedLoadBalancer.java b/java-network/src/main/java/com/linkedin/norbert/javacompat/network/RingHashPartitionedLoadBalancer.java index f1a74cd4..1fc8d76b 100644 --- a/java-network/src/main/java/com/linkedin/norbert/javacompat/network/RingHashPartitionedLoadBalancer.java +++ b/java-network/src/main/java/com/linkedin/norbert/javacompat/network/RingHashPartitionedLoadBalancer.java @@ -18,6 +18,7 @@ import com.linkedin.norbert.javacompat.cluster.Node; import org.apache.log4j.Logger; +import java.util.LinkedHashSet; import java.util.Map; import java.util.Set; import java.util.TreeMap; @@ -83,8 +84,23 @@ public Node nextNode(Integer partitionedId, Long capability, Long persistentCapa log.warn("All endpoints seem unavailable! Using the default"); return firstEndpoint.getNode(); } - - @Override + + @Override + public LinkedHashSet nextNodes(Integer integer) { + return null; + } + + @Override + public LinkedHashSet nextNodes(Integer integer, Long capability) { + return null; + } + + @Override + public LinkedHashSet nextNodes(Integer integer, Long capability, Long persistentCapability) { + return null; + } + + @Override public Set nodesForPartitionedId(Integer partitionedId) { throw new UnsupportedOperationException(); } diff --git a/java-network/src/main/scala/com/linkedin/norbert/javacompat/network/JavaLbfToScalaLbf.scala b/java-network/src/main/scala/com/linkedin/norbert/javacompat/network/JavaLbfToScalaLbf.scala index 588a6839..98d9256b 100644 --- a/java-network/src/main/scala/com/linkedin/norbert/javacompat/network/JavaLbfToScalaLbf.scala +++ b/java-network/src/main/scala/com/linkedin/norbert/javacompat/network/JavaLbfToScalaLbf.scala @@ -2,13 +2,13 @@ package com.linkedin.norbert package javacompat package network -import com.linkedin.norbert.network.partitioned.loadbalancer.{PartitionedLoadBalancerFactory => SPartitionedLoadBalancerFactory, PartitionedLoadBalancer => SPartitionedLoadBalancer} -import com.linkedin.norbert.network.client.loadbalancer.{LoadBalancerFactory => SLoadBalancerFactory, LoadBalancer => SLoadBalancer} +import java.util +import com.linkedin.norbert.EndpointConversions._ import com.linkedin.norbert.cluster.{Node => SNode} +import com.linkedin.norbert.javacompat.cluster.Node import com.linkedin.norbert.network.common.{Endpoint => SEndpoint} - -import com.linkedin.norbert.EndpointConversions._ +import com.linkedin.norbert.network.partitioned.loadbalancer.{PartitionedLoadBalancer => SPartitionedLoadBalancer, PartitionedLoadBalancerFactory => SPartitionedLoadBalancerFactory} class JavaLbfToScalaLbf[PartitionedId](javaLbf: PartitionedLoadBalancerFactory[PartitionedId]) extends SPartitionedLoadBalancerFactory[PartitionedId] { def newLoadBalancer(nodes: Set[SEndpoint]) = { @@ -78,7 +78,25 @@ class JavaLbfToScalaLbf[PartitionedId](javaLbf: PartitionedLoadBalancerFactory[P } sMap } - } + + def rewrap(nodes: util.LinkedHashSet[com.linkedin.norbert.javacompat.cluster.Node]): util.LinkedHashSet[SNode] = { + val result = new util.LinkedHashSet[SNode]() + val it = nodes.iterator() + while(it.hasNext) { + val node:Node = it.next() + result.add(new SNode(node.id, node.url, node.available, node.partitionIds, node.capability, node.persistentCapability)) + } + result + } + override def nextNodes(id: PartitionedId, capability: Option[Long], persistentCapability: Option[Long]): util.LinkedHashSet[SNode] = { + (capability, persistentCapability) match { + case (Some(c),Some(pc)) => rewrap(lb.nextNodes(id, c.longValue, pc.longValue)) + case (None, Some(pc)) => rewrap(lb.nextNodes(id, 0L, pc.longValue)) + case (Some(c), None) => rewrap(lb.nextNodes(id, c.longValue, 0L)) + case (None, None) => rewrap(lb.nextNodes(id)) + } + } + } } diff --git a/java-network/src/main/scala/com/linkedin/norbert/javacompat/network/ScalaLbfToJavaLbf.scala b/java-network/src/main/scala/com/linkedin/norbert/javacompat/network/ScalaLbfToJavaLbf.scala index 6f821553..3d62221c 100644 --- a/java-network/src/main/scala/com/linkedin/norbert/javacompat/network/ScalaLbfToJavaLbf.scala +++ b/java-network/src/main/scala/com/linkedin/norbert/javacompat/network/ScalaLbfToJavaLbf.scala @@ -2,10 +2,12 @@ package com.linkedin.norbert package javacompat package network -import com.linkedin.norbert.network.partitioned.loadbalancer.{PartitionedLoadBalancerFactory => SPartitionedLoadBalancerFactory} +import java.{lang, util} + import com.linkedin.norbert.EndpointConversions._ -import javacompat.cluster.Node -import javacompat._ +import com.linkedin.norbert.javacompat._ +import com.linkedin.norbert.javacompat.cluster.{JavaNode, Node => JNode} +import com.linkedin.norbert.network.partitioned.loadbalancer.{PartitionedLoadBalancerFactory => SPartitionedLoadBalancerFactory} class ScalaLbfToJavaLbf[PartitionedId](scalaLbf: SPartitionedLoadBalancerFactory[PartitionedId]) extends PartitionedLoadBalancerFactory[PartitionedId] { @@ -22,7 +24,7 @@ class ScalaLbfToJavaLbf[PartitionedId](scalaLbf: SPartitionedLoadBalancerFactory def nodesForOneReplica(id: PartitionedId, capability: java.lang.Long, persistentCapability: java.lang.Long) = { val replica = scalaBalancer.nodesForOneReplica(id, capability, persistentCapability) - val result = new java.util.HashMap[Node, java.util.Set[java.lang.Integer]](replica.size) + val result = new java.util.HashMap[JNode, java.util.Set[java.lang.Integer]](replica.size) replica.foreach { case (node, partitions) => result.put(node, partitions) @@ -48,7 +50,7 @@ class ScalaLbfToJavaLbf[PartitionedId](scalaLbf: SPartitionedLoadBalancerFactory def nodesForPartitionedId(id: PartitionedId, capability: java.lang.Long, persistentCapability: java.lang.Long) = { val set = scalaBalancer.nodesForPartitionedId(id, capability, persistentCapability) - val jSet = new java.util.HashSet[Node]() + val jSet = new java.util.HashSet[JNode]() set.foldLeft(jSet) { case (jSet, node) => {jSet.add(node); jSet} } jSet } @@ -58,7 +60,7 @@ class ScalaLbfToJavaLbf[PartitionedId](scalaLbf: SPartitionedLoadBalancerFactory def nodesForPartitions(id: PartitionedId, partitions: java.util.Set[java.lang.Integer], capability: java.lang.Long) = nodesForPartitions(id, partitions, capability, 0L) def nodesForPartitions(id: PartitionedId, partitions:java.util.Set[java.lang.Integer], capability: java.lang.Long, persistentCapability: java.lang.Long) = { val replica = scalaBalancer.nodesForPartitions(id, partitions, capability, persistentCapability) - val result = new java.util.HashMap[Node, java.util.Set[java.lang.Integer]](replica.size) + val result = new java.util.HashMap[JNode, java.util.Set[java.lang.Integer]](replica.size) replica.foreach { case (node, partitions) => result.put(node, partitions) @@ -71,6 +73,26 @@ class ScalaLbfToJavaLbf[PartitionedId](scalaLbf: SPartitionedLoadBalancerFactory if (capability.longValue == 0L) None else Some(capability.longValue) } + + override def nextNodes(id: PartitionedId): util.LinkedHashSet[JNode] = nextNodes(id, 0L, 0L) + + override def nextNodes(id: PartitionedId, capability: lang.Long): util.LinkedHashSet[JNode] = nextNodes(id, capability, 0L) + + override def nextNodes(id: PartitionedId, capability: lang.Long, persistentCapability: lang.Long): util.LinkedHashSet[JNode] = { + rewrap(scalaBalancer.nextNodes(id, Option(capability), Option(persistentCapability))) + } + + def rewrap(nodes: util.LinkedHashSet[com.linkedin.norbert.cluster.Node]): util.LinkedHashSet[JNode] = { + val result = new util.LinkedHashSet[JNode]() + val it = nodes.iterator() + while(it.hasNext) { + val node:cluster.Node = it.next() + result.add(new JavaNode(node.id, node.url, node.available, node.partitionIds, node.capability, node.persistentCapability)) + } + result + + } + } diff --git a/network/src/main/scala/com/linkedin/norbert/network/netty/NettyNetworkClient.scala b/network/src/main/scala/com/linkedin/norbert/network/netty/NettyNetworkClient.scala index db9db508..f574c741 100644 --- a/network/src/main/scala/com/linkedin/norbert/network/netty/NettyNetworkClient.scala +++ b/network/src/main/scala/com/linkedin/norbert/network/netty/NettyNetworkClient.scala @@ -24,7 +24,7 @@ import org.jboss.netty.handler.codec.frame.{LengthFieldBasedFrameDecoder, Length import org.jboss.netty.handler.codec.protobuf.{ProtobufDecoder, ProtobufEncoder} import java.util.concurrent.Executors import partitioned.loadbalancer.{PartitionedLoadBalancerFactoryComponent, PartitionedLoadBalancerFactory} -import partitioned.PartitionedNetworkClient +import com.linkedin.norbert.network.partitioned.{PartitionedNetworkClientFailOver, PartitionedNetworkClient} import client.loadbalancer.{LoadBalancerFactoryComponent, LoadBalancerFactory} import com.linkedin.norbert.cluster.{Node, ClusterClient, ClusterClientComponent} import protos.NorbertProtos @@ -157,3 +157,8 @@ class NettyPartitionedNetworkClient[PartitionedId](clientConfig: NetworkClientCo with PartitionedNetworkClient[PartitionedId] with PartitionedLoadBalancerFactoryComponent[PartitionedId] { setConfig(clientConfig) } + +class NettyPartitionedFailOverNetworkClient[PartitionedId](clientConfig: NetworkClientConfig, val loadBalancerFactory: PartitionedLoadBalancerFactory[PartitionedId]) extends BaseNettyNetworkClient(clientConfig) +with PartitionedNetworkClientFailOver[PartitionedId] with PartitionedLoadBalancerFactoryComponent[PartitionedId] { + setConfig(clientConfig) +} diff --git a/network/src/main/scala/com/linkedin/norbert/network/partitioned/PartitionedNetworkClient.scala b/network/src/main/scala/com/linkedin/norbert/network/partitioned/PartitionedNetworkClient.scala index 9dad0c7a..d45eb87c 100644 --- a/network/src/main/scala/com/linkedin/norbert/network/partitioned/PartitionedNetworkClient.scala +++ b/network/src/main/scala/com/linkedin/norbert/network/partitioned/PartitionedNetworkClient.scala @@ -17,15 +17,19 @@ package com.linkedin.norbert package network package partitioned -import java.util.concurrent.Future -import common._ -import loadbalancer.{PartitionedLoadBalancer, PartitionedLoadBalancerFactoryComponent, PartitionedLoadBalancerFactory} -import server.{MessageExecutorComponent, NetworkServer} -import netty.NettyPartitionedNetworkClient -import client.NetworkClientConfig -import cluster.{Node, ClusterDisconnectedException, InvalidClusterException, ClusterClientComponent} +import java.net.ConnectException +import java.util.concurrent.{Future, TimeUnit} + +import com.linkedin.norbert.cluster.{ClusterClientComponent, ClusterDisconnectedException, InvalidClusterException, Node} +import com.linkedin.norbert.network.NoNodesAvailableException +import com.linkedin.norbert.network.client.NetworkClientConfig +import com.linkedin.norbert.network.common._ +import com.linkedin.norbert.network.netty.NettyPartitionedNetworkClient +import com.linkedin.norbert.network.partitioned.loadbalancer.{PartitionedLoadBalancer, PartitionedLoadBalancerFactory, PartitionedLoadBalancerFactoryComponent} +import com.linkedin.norbert.network.server.{MessageExecutorComponent, NetworkServer} + +import scala.beans.BeanProperty import scala.util.Random -import java.util object RoutingConfigs { val defaultRoutingConfigs = new RoutingConfigs(false, false) @@ -72,7 +76,7 @@ trait PartitionedNetworkClient[PartitionedId] extends BaseNetworkClient { retryStrategy = config.retryStrategy } - @volatile private var loadBalancer: Option[Either[InvalidClusterException, PartitionedLoadBalancer[PartitionedId]]] = None + @volatile protected var loadBalancer: Option[Either[InvalidClusterException, PartitionedLoadBalancer[PartitionedId]]] = None def sendRequest[RequestMsg, ResponseMsg](id: PartitionedId, request: RequestMsg, callback: Either[Throwable, ResponseMsg] => Unit) (implicit is: InputSerializer[RequestMsg, ResponseMsg], os: OutputSerializer[RequestMsg, ResponseMsg]): Unit = @@ -768,3 +772,46 @@ trait PartitionedNetworkClient[PartitionedId] extends BaseNetworkClient { } } + +trait PartitionedNetworkClientFailOver[PartitionedId] extends PartitionedNetworkClient[PartitionedId] { + + this: ClusterClientComponent with ClusterIoClientComponent with PartitionedLoadBalancerFactoryComponent[PartitionedId] => + + override def sendRequest[RequestMsg, ResponseMsg](id: PartitionedId, request: RequestMsg, callback: Either[Throwable, ResponseMsg] => Unit, capability: Option[Long], persistentCapability: Option[Long]) + (implicit is: InputSerializer[RequestMsg, ResponseMsg], os: OutputSerializer[RequestMsg, ResponseMsg]): Unit = doIfConnected { + if (id == null || request == null) throw new NullPointerException + + val nodes = loadBalancer.getOrElse(throw new ClusterDisconnectedException).fold(ex => throw ex, + lb => lb.nextNodes(id, capability, persistentCapability)) + + if (nodes.isEmpty) { + throw new NoNodesAvailableException("Unable to satisfy request, no node available for id %s".format(id)) + } else { + val nodeIterator = nodes.iterator() + val firstNode = nodeIterator.next() + var failOverNode :Option[Node] = None + if (nodeIterator.hasNext) { + failOverNode = Option[Node](nodeIterator.next()) + } + val failOverCallback = (e:Either[Throwable, ResponseMsg]) => { + if (failOverNode.isDefined) { + e match { + case Right(ex:ConnectException) => failOverRequestToNextNode(failOverNode.get, id, request, callback, capability, persistentCapability); + case Right(ex:Throwable) => callback.apply(e); + case Left(r:ResponseMsg) => callback.apply(e); + } + } else { + callback.apply(e) + } + () // force unit return type + } + doSendRequest(PartitionedRequest(request, firstNode, Set(id), (node: Node, ids: Set[PartitionedId]) => request, is, os, Option(failOverCallback))) + } + } + + def failOverRequestToNextNode[RequestMsg, ResponseMsg]( node:Node, id: PartitionedId, request: RequestMsg, callback: Either[Throwable, ResponseMsg] => Unit, capability: Option[Long], persistentCapability: Option[Long]) + (implicit is: InputSerializer[RequestMsg, ResponseMsg], os: OutputSerializer[RequestMsg, ResponseMsg]): Unit = doIfConnected { + doSendRequest(PartitionedRequest(request, node, Set(id), (node: Node, ids: Set[PartitionedId]) => request, is, os, Option(callback))) + } + +} \ No newline at end of file diff --git a/network/src/main/scala/com/linkedin/norbert/network/partitioned/loadbalancer/DefaultClusteredLoadBalancerFactory.scala b/network/src/main/scala/com/linkedin/norbert/network/partitioned/loadbalancer/DefaultClusteredLoadBalancerFactory.scala index e0588361..3ee777de 100644 --- a/network/src/main/scala/com/linkedin/norbert/network/partitioned/loadbalancer/DefaultClusteredLoadBalancerFactory.scala +++ b/network/src/main/scala/com/linkedin/norbert/network/partitioned/loadbalancer/DefaultClusteredLoadBalancerFactory.scala @@ -18,10 +18,13 @@ package network package partitioned package loadbalancer -import logging.Logging -import cluster.{Node, InvalidClusterException} -import common.Endpoint +import java.util import java.util.concurrent.atomic.{AtomicBoolean, AtomicInteger} + +import com.linkedin.norbert.cluster.{InvalidClusterException, Node} +import com.linkedin.norbert.logging.Logging +import com.linkedin.norbert.network.common.Endpoint + import scala.util.Random import scala.util.control.Breaks._ @@ -275,6 +278,15 @@ abstract class DefaultClusteredLoadBalancerFactory[PartitionedId](numPartitions: } } } + + /** + * Returns the consistent ordered set of nodes to which messages should be routed; the order is based on the PartitionId provided. + * + * @param id the id based on which the order of the nodes will be determined + * @return an ordered set of nodes + */ + override def nextNodes(id: PartitionedId, capability: Option[Long], persistentCapability: Option[Long]): util.LinkedHashSet[Node] = + nodesForPartition(partitionForId(id), capability, persistentCapability) } /** diff --git a/network/src/main/scala/com/linkedin/norbert/network/partitioned/loadbalancer/DefaultLoadBalancerHelper.scala b/network/src/main/scala/com/linkedin/norbert/network/partitioned/loadbalancer/DefaultLoadBalancerHelper.scala index a304b1b4..fa4e1274 100644 --- a/network/src/main/scala/com/linkedin/norbert/network/partitioned/loadbalancer/DefaultLoadBalancerHelper.scala +++ b/network/src/main/scala/com/linkedin/norbert/network/partitioned/loadbalancer/DefaultLoadBalancerHelper.scala @@ -18,12 +18,13 @@ package network package partitioned package loadbalancer -import cluster.{InvalidClusterException, Node} -import common.Endpoint +import java.util import java.util.concurrent.atomic.{AtomicBoolean, AtomicInteger} -import annotation.tailrec -import client.loadbalancer.LoadBalancerHelpers -import logging.Logging + +import com.linkedin.norbert.cluster.{InvalidClusterException, Node} +import com.linkedin.norbert.logging.Logging +import com.linkedin.norbert.network.client.loadbalancer.LoadBalancerHelpers +import com.linkedin.norbert.network.common.Endpoint /** * A mixin trait that provides functionality to help implement a hash based Router. @@ -83,7 +84,6 @@ trait DefaultLoadBalancerHelper extends LoadBalancerHelpers with Logging { case None => return None case Some((endpoints, counter, states)) => - import math._ val es = endpoints.size counter.compareAndSet(java.lang.Integer.MAX_VALUE, 0) val idx = counter.getAndIncrement @@ -106,6 +106,32 @@ trait DefaultLoadBalancerHelper extends LoadBalancerHelpers with Logging { } } + protected def nodesForPartition(partitionId: Int, capability: Option[Long] = None, persistentCapability: Option[Long] = None): util.LinkedHashSet[Node] = { + partitionToNodeMap.get(partitionId) match { + case None => + return new util.LinkedHashSet[Node] + case Some((endpoints, counter, states)) => + val es = endpoints.size + counter.compareAndSet(java.lang.Integer.MAX_VALUE, 0) + val idx = counter.getAndIncrement + var i = idx + var loopCount = 0 + val result = new util.LinkedHashSet[Node] + do { + val endpoint = endpoints(i % es) + if(endpoint.canServeRequests && endpoint.node.isCapableOf(capability, persistentCapability)) { + result.add(endpoint.node) + } + + i = i + 1 + if (i < 0) i = 0 + loopCount = loopCount + 1 + } while (loopCount <= es) + + result + } + } + def compensateCounter(idx: Int, count:Int, counter:AtomicInteger) { if (idx + 1 + count <= 0) { // Integer overflow diff --git a/network/src/main/scala/com/linkedin/norbert/network/partitioned/loadbalancer/DefaultPartitionedLoadBalancerFactory.scala b/network/src/main/scala/com/linkedin/norbert/network/partitioned/loadbalancer/DefaultPartitionedLoadBalancerFactory.scala index 2a2c4df9..39b91df1 100644 --- a/network/src/main/scala/com/linkedin/norbert/network/partitioned/loadbalancer/DefaultPartitionedLoadBalancerFactory.scala +++ b/network/src/main/scala/com/linkedin/norbert/network/partitioned/loadbalancer/DefaultPartitionedLoadBalancerFactory.scala @@ -18,11 +18,13 @@ package network package partitioned package loadbalancer -import logging.Logging -import cluster.{Node, InvalidClusterException} -import common.Endpoint +import java.util import java.util.concurrent.atomic.{AtomicBoolean, AtomicInteger} -import scala.util.Random + +import com.linkedin.norbert.cluster.{InvalidClusterException, Node} +import com.linkedin.norbert.logging.Logging +import com.linkedin.norbert.network.common.Endpoint + import scala.util.control.Breaks._ /** @@ -55,7 +57,7 @@ abstract class DefaultPartitionedLoadBalancerFactory[PartitionedId](numPartition partitionToNodeMap.keys.foldLeft(Map.empty[Node, Set[Int]]) { (map, partition) => val nodeOption = nodeForPartition(partition, capability, persistentCapability) if(nodeOption.isDefined) { - val n = nodeOption.get + val n = nodeOption.iterator.next() map + (n -> (map.getOrElse(n, Set.empty[Int]) + partition)) } else if(serveRequestsIfPartitionMissing) { log.warn("Partition %s is unavailable, attempting to continue serving requests to other partitions.".format(partition)) @@ -96,7 +98,6 @@ abstract class DefaultPartitionedLoadBalancerFactory[PartitionedId](numPartition case None => break case Some((endpoints, counter, states)) => - import math._ val es = endpoints.size counter.compareAndSet(java.lang.Integer.MAX_VALUE, 0) val idx = counter.getAndIncrement % es @@ -152,6 +153,15 @@ abstract class DefaultPartitionedLoadBalancerFactory[PartitionedId](numPartition } } } + + /** + * Returns the consistent ordered set of nodes to which messages should be routed; the order is based on the PartitionId provided. + * + * @param id the id based on which the order of the nodes will be determined + * @return an ordered set of nodes + */ + override def nextNodes(id: PartitionedId, capability: Option[Long], persistentCapability: Option[Long]): util.LinkedHashSet[Node] = + nodesForPartition(partitionForId(id), capability, persistentCapability) } /** diff --git a/network/src/main/scala/com/linkedin/norbert/network/partitioned/loadbalancer/PartitionedConsistentHashedLoadBalancerFactory.scala b/network/src/main/scala/com/linkedin/norbert/network/partitioned/loadbalancer/PartitionedConsistentHashedLoadBalancerFactory.scala index e6cb66e3..e3208202 100644 --- a/network/src/main/scala/com/linkedin/norbert/network/partitioned/loadbalancer/PartitionedConsistentHashedLoadBalancerFactory.scala +++ b/network/src/main/scala/com/linkedin/norbert/network/partitioned/loadbalancer/PartitionedConsistentHashedLoadBalancerFactory.scala @@ -1,12 +1,12 @@ package com.linkedin.norbert.network.partitioned.loadbalancer -import com.linkedin.norbert.network.common.Endpoint +import java.util import java.util.TreeMap -import com.linkedin.norbert.cluster.{Node, InvalidClusterException} -import com.linkedin.norbert.logging.Logging -import com.linkedin.norbert.network.client.loadbalancer.LoadBalancerHelpers import java.util.concurrent.atomic.{AtomicBoolean, AtomicInteger} +import com.linkedin.norbert.cluster.{InvalidClusterException, Node} +import com.linkedin.norbert.network.common.Endpoint + /* * Copyright 2009-2010 LinkedIn, Inc * @@ -141,11 +141,36 @@ class PartitionedConsistentHashedLoadBalancer[PartitionedId](numPartitions: Int, throw new InvalidClusterException("Partition %s is unavailable, cannot serve requests.".format(partition)) } } - + + override def nextNodes(id: PartitionedId, capability: Option[Long] = None, persistentCapability: Option[Long] = None): util.LinkedHashSet[Node] = { + val hash = hashFn(id) + val partitionId = hash.abs % numPartitions + // FIXME: wheels.get(partitionId) here may fail, we should use PartitionUtil.searchWheel to search the outer wheel as well + // this can happen if numPartitions is a lie, or if we have non-contiguous partitions (0, 2, 4), form + val innerMapOpt = wheels.get(partitionId) + val result = new util.LinkedHashSet[Node]() + if (innerMapOpt.isDefined) { + val innerMap = innerMapOpt.get + val startEntry = PartitionUtil.wheelEntry(innerMap, hash) + if (startEntry != null) { + result.add(startEntry.getValue.node) + var nextEntry = PartitionUtil.rotateWheel(innerMap, startEntry.getKey) + while (nextEntry != startEntry) { + result.add(nextEntry.getValue.node) + nextEntry = PartitionUtil.rotateWheel(innerMap, nextEntry.getKey) + } + result + } + } + log.warn("Failed to find mapping for %s") + result + } def nextNode(id: PartitionedId, capability: Option[Long] = None, persistentCapability: Option[Long] = None): Option[Node] = { val hash = hashFn(id) val partitionId = hash.abs % numPartitions + + // FIXME: wheels.get(partitionId) here may fail, we should use PartitionUtil.searchWheel to search the outer wheel as well wheels.get(partitionId).flatMap { wheel => PartitionUtil.searchWheel(wheel, hash, (e: Endpoint) => e.canServeRequests && e.node.isCapableOf(capability, persistentCapability) ) }.map(_.node) diff --git a/network/src/main/scala/com/linkedin/norbert/network/partitioned/loadbalancer/PartitionedLoadBalancerFactory.scala b/network/src/main/scala/com/linkedin/norbert/network/partitioned/loadbalancer/PartitionedLoadBalancerFactory.scala index ebcf1e1a..f1ed11ec 100644 --- a/network/src/main/scala/com/linkedin/norbert/network/partitioned/loadbalancer/PartitionedLoadBalancerFactory.scala +++ b/network/src/main/scala/com/linkedin/norbert/network/partitioned/loadbalancer/PartitionedLoadBalancerFactory.scala @@ -19,15 +19,27 @@ package partitioned package loadbalancer +import java.util + +import com.linkedin.norbert.cluster.{InvalidClusterException, Node} +import com.linkedin.norbert.network.common.Endpoint + import _root_.scala.Predef._ -import cluster.{InvalidClusterException, Node} -import common.Endpoint /** * A PartitionedLoadBalancer handles calculating the next Node a message should be routed to * based on a PartitionedId. */ trait PartitionedLoadBalancer[PartitionedId] { + + /** + * Returns the consistent ordered set of nodes to which messages should be routed; the order is based on the PartitionId provided. + * + * @param id the id based on which the order of the nodes will be determined + * @return an ordered set of nodes + */ + def nextNodes(id: PartitionedId, capability: Option[Long] = None, persistentCapability: Option[Long] = None): util.LinkedHashSet[Node] + /** * Returns the next Node a message should be routed to based on the PartitionId provided. * diff --git a/network/src/main/scala/com/linkedin/norbert/network/partitioned/loadbalancer/SimpleConsistentHashedLoadBalancerFactory.scala b/network/src/main/scala/com/linkedin/norbert/network/partitioned/loadbalancer/SimpleConsistentHashedLoadBalancerFactory.scala index 87c1fd9f..6dbafed2 100644 --- a/network/src/main/scala/com/linkedin/norbert/network/partitioned/loadbalancer/SimpleConsistentHashedLoadBalancerFactory.scala +++ b/network/src/main/scala/com/linkedin/norbert/network/partitioned/loadbalancer/SimpleConsistentHashedLoadBalancerFactory.scala @@ -19,9 +19,11 @@ package network package partitioned package loadbalancer -import common.Endpoint +import java.util import java.util.TreeMap -import cluster.{Node, InvalidClusterException} + +import com.linkedin.norbert.cluster.{InvalidClusterException, Node} +import com.linkedin.norbert.network.common.Endpoint /** * This load balancer is appropriate when any server could handle the request. In this case, the partitions don't really mean anything. They simply control a percentage of the requests @@ -63,4 +65,6 @@ class SimpleConsistentHashedLoadBalancer[PartitionedId](wheel: TreeMap[Int, Endp def nextNode(id: PartitionedId, capability: Option[Long], persistentCapability: Option[Long]): Option[Node] = { PartitionUtil.searchWheel(wheel, hashFn(id), (e: Endpoint) => e.canServeRequests && e.node.isCapableOf(capability, persistentCapability)).map(_.node) } + + override def nextNodes(id: PartitionedId, capability: Option[Long], persistentCapability: Option[Long]): util.LinkedHashSet[Node] = throw new UnsupportedOperationException } diff --git a/network/src/test/scala/com/linkedin/norbert/network/partitioned/PartitionedNetworkClientSpec.scala b/network/src/test/scala/com/linkedin/norbert/network/partitioned/PartitionedNetworkClientSpec.scala index fc0bcf30..3fec5633 100644 --- a/network/src/test/scala/com/linkedin/norbert/network/partitioned/PartitionedNetworkClientSpec.scala +++ b/network/src/test/scala/com/linkedin/norbert/network/partitioned/PartitionedNetworkClientSpec.scala @@ -17,12 +17,12 @@ package com.linkedin.norbert package network package partitioned -import common.{Endpoint, ClusterIoClientComponent, BaseNetworkClientSpecification} -import loadbalancer._ +import java.util import java.util.concurrent.ExecutionException -import cluster.{Node, InvalidClusterException, ClusterDisconnectedException, ClusterClientComponent} -import scala.Left -import scala.Some + +import com.linkedin.norbert.cluster.{ClusterClientComponent, ClusterDisconnectedException, InvalidClusterException, Node} +import com.linkedin.norbert.network.common.{BaseNetworkClientSpecification, ClusterIoClientComponent} +import com.linkedin.norbert.network.partitioned.loadbalancer._ class PartitionedNetworkClientSpec extends BaseNetworkClientSpecification { val networkClient = new PartitionedNetworkClient[Int] with ClusterClientComponent with ClusterIoClientComponent @@ -33,6 +33,7 @@ class PartitionedNetworkClientSpec extends BaseNetworkClientSpecification { def nodesForOneReplica(id: Int, capability: Option[Long], permanentCapability: Option[Long]) = lb.nodesForOneReplica(id, capability, permanentCapability) def nodesForPartitionedId(id: Int, capability: Option[Long], permanentCapability: Option[Long]) = lb.nodesForPartitionedId(id, capability, permanentCapability) def nodesForPartitions(id: Int, partitions: Set[Int], capability: Option[Long], permanentCapability: Option[Long]) = lb.nodesForPartitions(id, partitions, capability, permanentCapability) + override def nextNodes(id: Int, capability: Option[Long], persistentCapability: Option[Long]): util.LinkedHashSet[Node] = lb.nextNodes(id, capability, persistentCapability) } val loadBalancerFactory = mock[PartitionedLoadBalancerFactory[Int]] val clusterIoClient = mock[ClusterIoClient] @@ -707,6 +708,7 @@ class PartitionedNetworkClientSpec extends BaseNetworkClientSpecification { def nodesForOneReplica(id: Int, c: Option[Long] = None, pc: Option[Long] = None) = null def nodesForPartitionedId(id:Int, c: Option[Long] = None, pc: Option[Long] = None) = null def nodesForPartitions(id: Int, partitions: Set[Int], c: Option[Long] = None, pc: Option[Long] = None) = null + def nextNodes(id: Int, capability: Option[Long], persistentCapability: Option[Long]): util.LinkedHashSet[Node] = null } val loadBalancerFactory = mock[PartitionedLoadBalancerFactory[Int]] val clusterIoClient = new ClusterIoClient { @@ -753,6 +755,7 @@ class PartitionedNetworkClientSpec extends BaseNetworkClientSpecification { def nodesForOneReplica(id: Int, c: Option[Long] = None, pc: Option[Long] = None) = null def nodesForPartitionedId(id:Int, c: Option[Long] = None, pc: Option[Long] = None) = null def nodesForPartitions(id: Int, partitions: Set[Int], c: Option[Long] = None, pc: Option[Long] = None)= null + def nextNodes(id: Int, capability: Option[Long], persistentCapability: Option[Long]): util.LinkedHashSet[Node] = null } val loadBalancerFactory = mock[PartitionedLoadBalancerFactory[Int]] val clusterIoClient = new ClusterIoClient { @@ -801,6 +804,8 @@ class PartitionedNetworkClientSpec extends BaseNetworkClientSpecification { def nodesForOneReplica(id: Int, c: Option[Long] = None, pc: Option[Long] = None) = null def nodesForPartitionedId(id:Int, c: Option[Long] = None, pc: Option[Long] = None) = null def nodesForPartitions(id: Int, partitions: Set[Int], c: Option[Long] = None, pc: Option[Long] = None) = null + def nextNodes(id: Int, capability: Option[Long], persistentCapability: Option[Long]): util.LinkedHashSet[Node] = null + } val loadBalancerFactory = mock[PartitionedLoadBalancerFactory[Int]] val clusterIoClient = new ClusterIoClient { @@ -850,6 +855,8 @@ class PartitionedNetworkClientSpec extends BaseNetworkClientSpecification { def nodesForOneReplica(id: Int, c: Option[Long] = None, pc: Option[Long] = None) = null def nodesForPartitionedId(id:Int, c: Option[Long] = None, pc: Option[Long] = None) = null def nodesForPartitions(id: Int, partitions: Set[Int], c: Option[Long] = None, pc: Option[Long] = None) = null + def nextNodes(id: Int, capability: Option[Long], persistentCapability: Option[Long]): util.LinkedHashSet[Node] = null + } val loadBalancerFactory = mock[PartitionedLoadBalancerFactory[Int]] val clusterIoClient = new ClusterIoClient { @@ -885,6 +892,7 @@ class PartitionedNetworkClientSpec extends BaseNetworkClientSpecification { def nodesForOneReplica(id: Int, c: Option[Long] = None, pc: Option[Long] = None) = null def nodesForPartitionedId(id:Int, c: Option[Long] = None, pc: Option[Long] = None) = null def nodesForPartitions(id: Int, partitions: Set[Int], c: Option[Long] = None, pc: Option[Long] = None) = null + def nextNodes(id: Int, capability: Option[Long], persistentCapability: Option[Long]): util.LinkedHashSet[Node] = null } val loadBalancerFactory = mock[PartitionedLoadBalancerFactory[Int]] val clusterIoClient = new ClusterIoClient { @@ -919,6 +927,8 @@ class PartitionedNetworkClientSpec extends BaseNetworkClientSpecification { def nodesForOneReplica(id: Int, c: Option[Long] = None, pc: Option[Long] = None) = null def nodesForPartitionedId(id:Int, c: Option[Long] = None, pc: Option[Long] = None) = null def nodesForPartitions(id: Int, partitions: Set[Int], c: Option[Long] = None, pc: Option[Long] = None) = null + def nextNodes(id: Int, capability: Option[Long], persistentCapability: Option[Long]): util.LinkedHashSet[Node] = null + } val loadBalancerFactory = mock[PartitionedLoadBalancerFactory[Int]] val clusterIoClient = new ClusterIoClient { diff --git a/network/src/test/scala/com/linkedin/norbert/network/partitioned/loadbalancer/PartitionedConsistentHashedLoadBalancerSpec.scala b/network/src/test/scala/com/linkedin/norbert/network/partitioned/loadbalancer/PartitionedConsistentHashedLoadBalancerSpec.scala index df1e6495..996867ec 100644 --- a/network/src/test/scala/com/linkedin/norbert/network/partitioned/loadbalancer/PartitionedConsistentHashedLoadBalancerSpec.scala +++ b/network/src/test/scala/com/linkedin/norbert/network/partitioned/loadbalancer/PartitionedConsistentHashedLoadBalancerSpec.scala @@ -4,6 +4,9 @@ import org.specs.SpecificationWithJUnit import com.linkedin.norbert.network.common.Endpoint import com.linkedin.norbert.cluster.{InvalidClusterException, Node} +import scala.collection.JavaConversions +import scala.collection.immutable.HashSet + /* * Copyright 2009-2010 LinkedIn, Inc * @@ -21,13 +24,16 @@ import com.linkedin.norbert.cluster.{InvalidClusterException, Node} */ class PartitionedConsistentHashedLoadBalancerSpec extends SpecificationWithJUnit { - class TestLBF(numPartitions: Int, csr: Boolean = true) + class TestLBF(numPartitions: Int, hashFunction:((Int) => Int), numReplicas:Int = 10, csr: Boolean = true) extends PartitionedConsistentHashedLoadBalancerFactory[Int](numPartitions, - 10, - (id: Int) => HashFunctions.fnv(BigInt(id).toByteArray), + numReplicas, + hashFunction, (str: String) => str.hashCode(), - csr) - + csr) { + def this(numPartitions: Int, numReplicas:Int = 10, csr: Boolean = true) = + this(numPartitions, (id: Int) => HashFunctions.fnv(BigInt(id).toByteArray), numReplicas, csr) + } + class TestEndpoint(val node: Node, var csr: Boolean) extends Endpoint { def canServeRequests = csr @@ -53,7 +59,15 @@ class PartitionedConsistentHashedLoadBalancerSpec extends SpecificationWithJUnit // loadBalancerFactory.partitionForId(EId(1210)) must be_==(0) // } // } - + + val overlappingAtPartitionZero = Set( + Node(0, "localhost:31311", true, Set(0,1)), + Node(1, "localhost:31312", true, Set(2)), + Node(2, "localhost:31313", true, Set(0,3)), + Node(3, "localhost:31314", true, Set(0,4)), + Node(4, "localhost:31315", true, Set(4)) + ) + val sampleNodes = Set( Node(0, "localhost:31313", true, Set(0, 1), Some(0x1), Some(0)), Node(1, "localhost:31313", true, Set(1, 2)), @@ -79,7 +93,7 @@ class PartitionedConsistentHashedLoadBalancerSpec extends SpecificationWithJUnit Node(0, "localhost:31313", true, Set[Int]()), Node(1, "localhost:31313", true, Set[Int]())) - new TestLBF(2, false).newLoadBalancer(toEndpoints(nodes)) must throwA[InvalidClusterException] + new TestLBF(2,10, false).newLoadBalancer(toEndpoints(nodes)) must throwA[InvalidClusterException] } "throw InvalidClusterException if one partition is unavailable, and the LBF cannot serve requests in that state, " in { @@ -87,8 +101,8 @@ class PartitionedConsistentHashedLoadBalancerSpec extends SpecificationWithJUnit Node(0, "localhost:31313", true, Set(1)), Node(1, "localhost:31313", true, Set[Int]())) - new TestLBF(2, true).newLoadBalancer(toEndpoints(nodes)) must not (throwA[InvalidClusterException]) - new TestLBF(2, false).newLoadBalancer(toEndpoints(nodes)) must throwA[InvalidClusterException] + new TestLBF(2,10, true).newLoadBalancer(toEndpoints(nodes)) must not (throwA[InvalidClusterException]) + new TestLBF(2,10, false).newLoadBalancer(toEndpoints(nodes)) must throwA[InvalidClusterException] } "successfully calculate broadcast nodes" in { @@ -144,10 +158,20 @@ class PartitionedConsistentHashedLoadBalancerSpec extends SpecificationWithJUnit // Mark node 4 down markUnavailable(endpoints, 4) - val lbf = new TestLBF(5, false) + val lbf = new TestLBF(5, 10, false) var loadBalancer = lbf.newLoadBalancer(endpoints) loadBalancer.nodesForOneReplica(0, Some(0), Some(0)) must throwA[InvalidClusterException] } + + "return a complete set of nodes within partition 0" in { + val nodes = overlappingAtPartitionZero + val endpoints = toEndpoints(nodes) + val lbf = new TestLBF(5, (i:Int)=>i, 10, true) + val loadBalancer = lbf.newLoadBalancer(endpoints) + val lbNodes = JavaConversions.asScalaSet(loadBalancer.nextNodes(0, None, None)) + val lbNodeIds = lbNodes.map((n:Node) => n.id) + lbNodeIds must be_==(Set(0, 2, 3)) + } } } \ No newline at end of file From 5ad1548c6c175bc72328ddcb88fc94c96976a6c3 Mon Sep 17 00:00:00 2001 From: Nick Hristov Date: Mon, 27 Oct 2014 12:32:37 -0700 Subject: [PATCH 09/20] Allow for the scala network client to be configurable directly from Java. --- .../network/client/NetworkClient.scala | 57 ++++++++++--------- 1 file changed, 30 insertions(+), 27 deletions(-) diff --git a/network/src/main/scala/com/linkedin/norbert/network/client/NetworkClient.scala b/network/src/main/scala/com/linkedin/norbert/network/client/NetworkClient.scala index 1ddb54ff..9df967c1 100644 --- a/network/src/main/scala/com/linkedin/norbert/network/client/NetworkClient.scala +++ b/network/src/main/scala/com/linkedin/norbert/network/client/NetworkClient.scala @@ -18,29 +18,32 @@ package network package client import java.util.concurrent.Future -import loadbalancer.{LoadBalancerFactory, LoadBalancer, LoadBalancerFactoryComponent} -import server.{MessageExecutorComponent, NetworkServer} -import cluster._ -import network.common._ -import netty.NettyNetworkClient + +import com.linkedin.norbert.cluster._ +import com.linkedin.norbert.network.client.loadbalancer.{LoadBalancer, LoadBalancerFactory, LoadBalancerFactoryComponent} +import com.linkedin.norbert.network.common._ +import com.linkedin.norbert.network.netty.NettyNetworkClient +import com.linkedin.norbert.network.server.{MessageExecutorComponent, NetworkServer} + +import scala.beans.BeanProperty object NetworkClientConfig { var defaultIteratorTimeout = NetworkDefaults.DEFAULT_ITERATOR_TIMEOUT; } class NetworkClientConfig { - var clusterClient: ClusterClient = _ - var clientName: String = _ - var serviceName: String = _ - var zooKeeperConnectString: String = _ - var zooKeeperSessionTimeoutMillis = ClusterDefaults.ZOOKEEPER_SESSION_TIMEOUT_MILLIS + @BeanProperty var clusterClient: ClusterClient = _ + @BeanProperty var clientName: String = _ + @BeanProperty var serviceName: String = _ + @BeanProperty var zooKeeperConnectString: String = _ + @BeanProperty var zooKeeperSessionTimeoutMillis = ClusterDefaults.ZOOKEEPER_SESSION_TIMEOUT_MILLIS - var connectTimeoutMillis = NetworkDefaults.CONNECT_TIMEOUT_MILLIS - var writeTimeoutMillis = NetworkDefaults.WRITE_TIMEOUT_MILLIS - var maxConnectionsPerNode = NetworkDefaults.MAX_CONNECTIONS_PER_NODE + @BeanProperty var connectTimeoutMillis = NetworkDefaults.CONNECT_TIMEOUT_MILLIS + @BeanProperty var writeTimeoutMillis = NetworkDefaults.WRITE_TIMEOUT_MILLIS + @BeanProperty var maxConnectionsPerNode = NetworkDefaults.MAX_CONNECTIONS_PER_NODE - var staleRequestTimeoutMins = NetworkDefaults.STALE_REQUEST_TIMEOUT_MINS - var staleRequestCleanupFrequenceMins = NetworkDefaults.STALE_REQUEST_CLEANUP_FREQUENCY_MINS + @BeanProperty var staleRequestTimeoutMins = NetworkDefaults.STALE_REQUEST_TIMEOUT_MINS + @BeanProperty var staleRequestCleanupFrequenceMins = NetworkDefaults.STALE_REQUEST_CLEANUP_FREQUENCY_MINS /** * Represents how long a channel stays alive. There are some specifics: @@ -48,22 +51,22 @@ class NetworkClientConfig { * closeChannelTimeMillis == 0: Immediately close the channel * closeChannelTimeMillis > 0: Close the channel after closeChannelTimeMillis */ - var closeChannelTimeMillis = NetworkDefaults.CLOSE_CHANNEL_TIMEOUT_MILLIS + @BeanProperty var closeChannelTimeMillis = NetworkDefaults.CLOSE_CHANNEL_TIMEOUT_MILLIS - var requestStatisticsWindow = NetworkDefaults.REQUEST_STATISTICS_WINDOW + @BeanProperty var requestStatisticsWindow = NetworkDefaults.REQUEST_STATISTICS_WINDOW - var outlierMuliplier = NetworkDefaults.OUTLIER_MULTIPLIER - var outlierConstant = NetworkDefaults.OUTLIER_CONSTANT + @BeanProperty var outlierMuliplier = NetworkDefaults.OUTLIER_MULTIPLIER + @BeanProperty var outlierConstant = NetworkDefaults.OUTLIER_CONSTANT - var responseHandlerCorePoolSize = NetworkDefaults.RESPONSE_THREAD_CORE_POOL_SIZE - var responseHandlerMaxPoolSize = NetworkDefaults.RESPONSE_THREAD_MAX_POOL_SIZE - var responseHandlerKeepAliveTime = NetworkDefaults.RESPONSE_THREAD_KEEP_ALIVE_TIME_SECS - var responseHandlerMaxWaitingQueueSize = NetworkDefaults.RESPONSE_THREAD_POOL_QUEUE_SIZE + @BeanProperty var responseHandlerCorePoolSize = NetworkDefaults.RESPONSE_THREAD_CORE_POOL_SIZE + @BeanProperty var responseHandlerMaxPoolSize = NetworkDefaults.RESPONSE_THREAD_MAX_POOL_SIZE + @BeanProperty var responseHandlerKeepAliveTime = NetworkDefaults.RESPONSE_THREAD_KEEP_ALIVE_TIME_SECS + @BeanProperty var responseHandlerMaxWaitingQueueSize = NetworkDefaults.RESPONSE_THREAD_POOL_QUEUE_SIZE - var avoidByteStringCopy = NetworkDefaults.AVOID_BYTESTRING_COPY - var darkCanaryServiceName: Option[String] = None - var retryStrategy:Option[RetryStrategy] = None - var duplicatesOk:Boolean = false + @BeanProperty var avoidByteStringCopy = NetworkDefaults.AVOID_BYTESTRING_COPY + @BeanProperty var darkCanaryServiceName: Option[String] = None + @BeanProperty var retryStrategy:Option[RetryStrategy] = None + @BeanProperty var duplicatesOk:Boolean = false } object NetworkClient { From 1c374effa4ba8f88c9cc521b2d8103b745f40557 Mon Sep 17 00:00:00 2001 From: Nick Hristov Date: Mon, 27 Oct 2014 12:35:17 -0700 Subject: [PATCH 10/20] Fail-over bugfixes. --- .../network/partitioned/PartitionedNetworkClient.scala | 9 +++++---- .../PartitionedConsistentHashedLoadBalancerFactory.scala | 4 ++-- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/network/src/main/scala/com/linkedin/norbert/network/partitioned/PartitionedNetworkClient.scala b/network/src/main/scala/com/linkedin/norbert/network/partitioned/PartitionedNetworkClient.scala index d45eb87c..e9ca097f 100644 --- a/network/src/main/scala/com/linkedin/norbert/network/partitioned/PartitionedNetworkClient.scala +++ b/network/src/main/scala/com/linkedin/norbert/network/partitioned/PartitionedNetworkClient.scala @@ -796,9 +796,9 @@ trait PartitionedNetworkClientFailOver[PartitionedId] extends PartitionedNetwork val failOverCallback = (e:Either[Throwable, ResponseMsg]) => { if (failOverNode.isDefined) { e match { - case Right(ex:ConnectException) => failOverRequestToNextNode(failOverNode.get, id, request, callback, capability, persistentCapability); - case Right(ex:Throwable) => callback.apply(e); - case Left(r:ResponseMsg) => callback.apply(e); + case Left(ex:ConnectException) => failOverRequestToNextNode(firstNode, failOverNode.get, id, request, callback, capability, persistentCapability); + case Left(ex:Throwable) => callback.apply(e); + case Right(r:ResponseMsg) => callback.apply(e); } } else { callback.apply(e) @@ -809,8 +809,9 @@ trait PartitionedNetworkClientFailOver[PartitionedId] extends PartitionedNetwork } } - def failOverRequestToNextNode[RequestMsg, ResponseMsg]( node:Node, id: PartitionedId, request: RequestMsg, callback: Either[Throwable, ResponseMsg] => Unit, capability: Option[Long], persistentCapability: Option[Long]) + def failOverRequestToNextNode[RequestMsg, ResponseMsg]( failureNode:Node, node:Node, id: PartitionedId, request: RequestMsg, callback: Either[Throwable, ResponseMsg] => Unit, capability: Option[Long], persistentCapability: Option[Long]) (implicit is: InputSerializer[RequestMsg, ResponseMsg], os: OutputSerializer[RequestMsg, ResponseMsg]): Unit = doIfConnected { + log.warn("request to node %d failed, re-routing to node %d".format(failureNode.id, node.id)) doSendRequest(PartitionedRequest(request, node, Set(id), (node: Node, ids: Set[PartitionedId]) => request, is, os, Option(callback))) } diff --git a/network/src/main/scala/com/linkedin/norbert/network/partitioned/loadbalancer/PartitionedConsistentHashedLoadBalancerFactory.scala b/network/src/main/scala/com/linkedin/norbert/network/partitioned/loadbalancer/PartitionedConsistentHashedLoadBalancerFactory.scala index e3208202..ffd7be18 100644 --- a/network/src/main/scala/com/linkedin/norbert/network/partitioned/loadbalancer/PartitionedConsistentHashedLoadBalancerFactory.scala +++ b/network/src/main/scala/com/linkedin/norbert/network/partitioned/loadbalancer/PartitionedConsistentHashedLoadBalancerFactory.scala @@ -159,10 +159,10 @@ class PartitionedConsistentHashedLoadBalancer[PartitionedId](numPartitions: Int, result.add(nextEntry.getValue.node) nextEntry = PartitionUtil.rotateWheel(innerMap, nextEntry.getKey) } - result + return result } } - log.warn("Failed to find mapping for %s") + log.warn("Failed to find mapping for %s, expect routing failures".format(id)) result } From 7c46591af3c607f5c99fa1a621941c46be54b595 Mon Sep 17 00:00:00 2001 From: Nick Hristov Date: Tue, 28 Oct 2014 12:13:04 -0700 Subject: [PATCH 11/20] Java compatibility for NettyNetworkServer configuration --- .../network/netty/NettyNetworkServer.scala | 28 ++++++++++--------- 1 file changed, 15 insertions(+), 13 deletions(-) diff --git a/network/src/main/scala/com/linkedin/norbert/network/netty/NettyNetworkServer.scala b/network/src/main/scala/com/linkedin/norbert/network/netty/NettyNetworkServer.scala index 0dce3db9..1e58855e 100644 --- a/network/src/main/scala/com/linkedin/norbert/network/netty/NettyNetworkServer.scala +++ b/network/src/main/scala/com/linkedin/norbert/network/netty/NettyNetworkServer.scala @@ -30,25 +30,27 @@ import protos.NorbertProtos import norbertutils.NamedPoolThreadFactory import org.jboss.netty.channel.{Channels, ChannelPipelineFactory} +import scala.beans.BeanProperty + class NetworkServerConfig { - var clusterClient: ClusterClient = _ - var serviceName: String = _ - var zooKeeperConnectString: String = _ - var zooKeeperSessionTimeoutMillis = 30000 + @BeanProperty var clusterClient: ClusterClient = _ + @BeanProperty var serviceName: String = _ + @BeanProperty var zooKeeperConnectString: String = _ + @BeanProperty var zooKeeperSessionTimeoutMillis = 30000 - var requestTimeoutMillis = NetworkDefaults.REQUEST_TIMEOUT_MILLIS - var responseGenerationTimeoutMillis = -1//turned off by default + @BeanProperty var requestTimeoutMillis = NetworkDefaults.REQUEST_TIMEOUT_MILLIS + @BeanProperty var responseGenerationTimeoutMillis = -1//turned off by default - var requestThreadCorePoolSize = NetworkDefaults.REQUEST_THREAD_CORE_POOL_SIZE - var requestThreadMaxPoolSize = NetworkDefaults.REQUEST_THREAD_MAX_POOL_SIZE - var requestThreadKeepAliveTimeSecs = NetworkDefaults.REQUEST_THREAD_KEEP_ALIVE_TIME_SECS + @BeanProperty var requestThreadCorePoolSize = NetworkDefaults.REQUEST_THREAD_CORE_POOL_SIZE + @BeanProperty var requestThreadMaxPoolSize = NetworkDefaults.REQUEST_THREAD_MAX_POOL_SIZE + @BeanProperty var requestThreadKeepAliveTimeSecs = NetworkDefaults.REQUEST_THREAD_KEEP_ALIVE_TIME_SECS - var threadPoolQueueSize = NetworkDefaults.REQUEST_THREAD_POOL_QUEUE_SIZE + @BeanProperty var threadPoolQueueSize = NetworkDefaults.REQUEST_THREAD_POOL_QUEUE_SIZE - var requestStatisticsWindow = NetworkDefaults.REQUEST_STATISTICS_WINDOW - var avoidByteStringCopy = NetworkDefaults.AVOID_BYTESTRING_COPY + @BeanProperty var requestStatisticsWindow = NetworkDefaults.REQUEST_STATISTICS_WINDOW + @BeanProperty var avoidByteStringCopy = NetworkDefaults.AVOID_BYTESTRING_COPY - var shutdownPauseMultiplier = NetworkDefaults.SHUTDOWN_PAUSE_MULTIPLIER + @BeanProperty var shutdownPauseMultiplier = NetworkDefaults.SHUTDOWN_PAUSE_MULTIPLIER } class NettyNetworkServer(serverConfig: NetworkServerConfig) extends NetworkServer with ClusterClientComponent with NettyClusterIoServerComponent From b2141d3bf2193dacda379208a39534020293052e Mon Sep 17 00:00:00 2001 From: Nick Hristov Date: Tue, 28 Oct 2014 12:15:47 -0700 Subject: [PATCH 12/20] Bugfix to deal with non-monotonically increasing partition identifiers in the consistent hash load balancer. --- ...dConsistentHashedLoadBalancerFactory.scala | 66 +++++++++++-------- 1 file changed, 40 insertions(+), 26 deletions(-) diff --git a/network/src/main/scala/com/linkedin/norbert/network/partitioned/loadbalancer/PartitionedConsistentHashedLoadBalancerFactory.scala b/network/src/main/scala/com/linkedin/norbert/network/partitioned/loadbalancer/PartitionedConsistentHashedLoadBalancerFactory.scala index ffd7be18..c41ef049 100644 --- a/network/src/main/scala/com/linkedin/norbert/network/partitioned/loadbalancer/PartitionedConsistentHashedLoadBalancerFactory.scala +++ b/network/src/main/scala/com/linkedin/norbert/network/partitioned/loadbalancer/PartitionedConsistentHashedLoadBalancerFactory.scala @@ -78,12 +78,15 @@ class PartitionedConsistentHashedLoadBalancerFactory[PartitionedId](numPartition } } -class PartitionedConsistentHashedLoadBalancer[PartitionedId](numPartitions: Int, wheels: Map[Int, TreeMap[Int, Endpoint]], hashFn: PartitionedId => Int, serveRequestsIfPartitionMissing: Boolean = true) +class PartitionedConsistentHashedLoadBalancer[PartitionedId](numPartitions: Int, wheels: Map[Int, util.TreeMap[Int, Endpoint]], hashFn: PartitionedId => Int, serveRequestsIfPartitionMissing: Boolean = true) extends PartitionedLoadBalancer[PartitionedId] with DefaultLoadBalancerHelper { import scala.collection.JavaConversions._ val endpoints = wheels.values.flatMap(_.values).toSet val partitionToNodeMap = generatePartitionToNodeMap(endpoints, numPartitions, serveRequestsIfPartitionMissing) val partitionIds = wheels.keySet.toSet + val treeWheels = new util.TreeMap[Int, util.TreeMap[Int, Endpoint]]() + treeWheels.putAll(wheels) + val wheelSize = treeWheels.size() def nodesForOneReplica(id: PartitionedId, capability: Option[Long] = None, persistentCapability: Option[Long] = None) = { nodesForPartitions(id, wheels, capability, persistentCapability) @@ -91,8 +94,13 @@ class PartitionedConsistentHashedLoadBalancer[PartitionedId](numPartitions: Int, def nodesForPartitionedId(id: PartitionedId, capability: Option[Long] = None, persistentCapability: Option[Long] = None) = { val hash = hashFn(id) - val partitionId = hash.abs % numPartitions - wheels.get(partitionId).flatMap { wheel => Option(wheel.foldLeft(Set.empty[Node]) { case (set, (p, e)) => if (e.node.isCapableOf(capability, persistentCapability)) set + e.node else set }) }.get + val partitionId = hash.abs % wheelSize + val entry = PartitionUtil.wheelEntry(treeWheels, partitionId) + if (entry == null) { + Set.empty[Node] + } else { + Option(entry.getValue).flatMap { wheel => Option(wheel.foldLeft(Set.empty[Node]) { case (set, (p, e)) => if (e.node.isCapableOf(capability, persistentCapability)) set + e.node else set }) }.get + } } def nodesForPartitions(id: PartitionedId, partitions: Set[Int], capability: Option[Long] = None, persistentCapability: Option[Long] = None) = { @@ -143,37 +151,43 @@ class PartitionedConsistentHashedLoadBalancer[PartitionedId](numPartitions: Int, } override def nextNodes(id: PartitionedId, capability: Option[Long] = None, persistentCapability: Option[Long] = None): util.LinkedHashSet[Node] = { - val hash = hashFn(id) - val partitionId = hash.abs % numPartitions - // FIXME: wheels.get(partitionId) here may fail, we should use PartitionUtil.searchWheel to search the outer wheel as well - // this can happen if numPartitions is a lie, or if we have non-contiguous partitions (0, 2, 4), form - val innerMapOpt = wheels.get(partitionId) val result = new util.LinkedHashSet[Node]() - if (innerMapOpt.isDefined) { - val innerMap = innerMapOpt.get - val startEntry = PartitionUtil.wheelEntry(innerMap, hash) - if (startEntry != null) { - result.add(startEntry.getValue.node) - var nextEntry = PartitionUtil.rotateWheel(innerMap, startEntry.getKey) - while (nextEntry != startEntry) { - result.add(nextEntry.getValue.node) - nextEntry = PartitionUtil.rotateWheel(innerMap, nextEntry.getKey) + val hash = hashFn(id) + val partitionId = hash.abs % wheelSize + val innerMapEntry = PartitionUtil.wheelEntry(treeWheels, partitionId) + if (innerMapEntry == null) { + result + } else { + val innerMapOpt = Option(innerMapEntry.getValue) + if (innerMapOpt.isDefined) { + val innerMap = innerMapOpt.get + val startEntry = PartitionUtil.wheelEntry(innerMap, hash) + if (startEntry != null) { + result.add(startEntry.getValue.node) + var nextEntry = PartitionUtil.rotateWheel(innerMap, startEntry.getKey) + while (nextEntry != startEntry) { + result.add(nextEntry.getValue.node) + nextEntry = PartitionUtil.rotateWheel(innerMap, nextEntry.getKey) + } + return result } - return result } + log.warn("Failed to find mapping for %s, expect routing failures".format(id)) + result } - log.warn("Failed to find mapping for %s, expect routing failures".format(id)) - result } def nextNode(id: PartitionedId, capability: Option[Long] = None, persistentCapability: Option[Long] = None): Option[Node] = { val hash = hashFn(id) - val partitionId = hash.abs % numPartitions - - // FIXME: wheels.get(partitionId) here may fail, we should use PartitionUtil.searchWheel to search the outer wheel as well - wheels.get(partitionId).flatMap { wheel => - PartitionUtil.searchWheel(wheel, hash, (e: Endpoint) => e.canServeRequests && e.node.isCapableOf(capability, persistentCapability) ) - }.map(_.node) + val partitionId = hash.abs % wheelSize + val innerMapEntry = PartitionUtil.wheelEntry(treeWheels, partitionId) + if (innerMapEntry == null) { + None + } else { + Option(innerMapEntry.getValue).flatMap { wheel => + PartitionUtil.searchWheel(wheel, hash, (e: Endpoint) => e.canServeRequests && e.node.isCapableOf(capability, persistentCapability) ) + }.map(_.node) + } } def partitionForId(id: PartitionedId): Int = { From 239b78299a1d1d9acd388d3f13bd7e17876b94e4 Mon Sep 17 00:00:00 2001 From: Nick Hristov Date: Tue, 28 Oct 2014 12:18:43 -0700 Subject: [PATCH 13/20] Fixing IntelliJ warnings. --- .../PartitionedConsistentHashedLoadBalancerFactory.scala | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/network/src/main/scala/com/linkedin/norbert/network/partitioned/loadbalancer/PartitionedConsistentHashedLoadBalancerFactory.scala b/network/src/main/scala/com/linkedin/norbert/network/partitioned/loadbalancer/PartitionedConsistentHashedLoadBalancerFactory.scala index c41ef049..eaaedd30 100644 --- a/network/src/main/scala/com/linkedin/norbert/network/partitioned/loadbalancer/PartitionedConsistentHashedLoadBalancerFactory.scala +++ b/network/src/main/scala/com/linkedin/norbert/network/partitioned/loadbalancer/PartitionedConsistentHashedLoadBalancerFactory.scala @@ -1,7 +1,6 @@ package com.linkedin.norbert.network.partitioned.loadbalancer import java.util -import java.util.TreeMap import java.util.concurrent.atomic.{AtomicBoolean, AtomicInteger} import com.linkedin.norbert.cluster.{InvalidClusterException, Node} @@ -59,7 +58,7 @@ class PartitionedConsistentHashedLoadBalancerFactory[PartitionedId](numPartition } val wheels = partitions.map { case (partition, endpointsForPartition) => - val wheel = new TreeMap[Int, Endpoint] + val wheel = new util.TreeMap[Int, Endpoint] endpointsForPartition.foreach { endpoint => var r = 0 while (r < numReplicas) { @@ -107,7 +106,7 @@ class PartitionedConsistentHashedLoadBalancer[PartitionedId](numPartitions: Int, nodesForPartitions(id, wheels.filterKeys(partitions contains _), capability, persistentCapability) } - private def nodesForPartitions(id: PartitionedId, wheels: Map[Int, TreeMap[Int, Endpoint]], capability: Option[Long], persistentCapability: Option[Long]) = { + private def nodesForPartitions(id: PartitionedId, wheels: Map[Int, util.TreeMap[Int, Endpoint]], capability: Option[Long], persistentCapability: Option[Long]) = { if (id == null) { nodesForPartitions0(partitionToNodeMap filterKeys wheels.containsKey, capability, persistentCapability) } else { @@ -139,7 +138,7 @@ class PartitionedConsistentHashedLoadBalancer[PartitionedId](numPartitions: Int, private def nodesForPartitions0(partitionToNodeMap: Map[Int, (IndexedSeq[Endpoint], AtomicInteger, Array[AtomicBoolean])], capability: Option[Long], persistentCapability: Option[Long] = None) = { partitionToNodeMap.keys.foldLeft(Map.empty[Node, Set[Int]]) { (map, partition) => val nodeOption = nodeForPartition(partition, capability, persistentCapability) - if(nodeOption isDefined) { + if(nodeOption.isDefined) { val n = nodeOption.get map + (n -> (map.getOrElse(n, Set.empty[Int]) + partition)) } else if(serveRequestsIfPartitionMissing) { From 2d201bc2daae58dca756115934872fdda1c1ea5d Mon Sep 17 00:00:00 2001 From: Nick Hristov Date: Tue, 28 Oct 2014 12:24:34 -0700 Subject: [PATCH 14/20] Removing Tidemark copyright --- .../network/ConsistentHashPartitionedLoadBalancerTest.java | 5 ----- 1 file changed, 5 deletions(-) diff --git a/java-network/src/test/java/com/linkedin/norbert/javacompat/network/ConsistentHashPartitionedLoadBalancerTest.java b/java-network/src/test/java/com/linkedin/norbert/javacompat/network/ConsistentHashPartitionedLoadBalancerTest.java index 8cc2d995..6f8882eb 100644 --- a/java-network/src/test/java/com/linkedin/norbert/javacompat/network/ConsistentHashPartitionedLoadBalancerTest.java +++ b/java-network/src/test/java/com/linkedin/norbert/javacompat/network/ConsistentHashPartitionedLoadBalancerTest.java @@ -1,8 +1,3 @@ -/* - * Copyright Tidemark Systems, Inc. All rights reserved. - * Tidemark Systems Confidential and Proprietary Information. Not for external distribution, use or sale. - * Tidemark Systems software is exclusively licensed according to the terms of our Software License and Services Agreement. - */ package com.linkedin.norbert.javacompat.network; import java.util.HashSet; From a9788498416c3028e561cfaeabf5a8ffba9526da Mon Sep 17 00:00:00 2001 From: Nick Hristov Date: Wed, 12 Nov 2014 13:37:30 -0800 Subject: [PATCH 15/20] Added the direct partitioned load balancer to the project. The direct partitioned loadbalancer allows clients to manually specify to which partition a request should be routed. --- .../DirectPartitionedLoadBalancer.scala | 110 ++++++++++++++++++ .../HashPartitionedLoadBalancerFactory.scala | 30 +++++ 2 files changed, 140 insertions(+) create mode 100644 network/src/main/scala/com/linkedin/norbert/network/partitioned/loadbalancer/DirectPartitionedLoadBalancer.scala create mode 100644 network/src/main/scala/com/linkedin/norbert/network/partitioned/loadbalancer/HashPartitionedLoadBalancerFactory.scala diff --git a/network/src/main/scala/com/linkedin/norbert/network/partitioned/loadbalancer/DirectPartitionedLoadBalancer.scala b/network/src/main/scala/com/linkedin/norbert/network/partitioned/loadbalancer/DirectPartitionedLoadBalancer.scala new file mode 100644 index 00000000..5483c358 --- /dev/null +++ b/network/src/main/scala/com/linkedin/norbert/network/partitioned/loadbalancer/DirectPartitionedLoadBalancer.scala @@ -0,0 +1,110 @@ +package com.linkedin.norbert.network.partitioned.loadbalancer + +import java.util + +import com.linkedin.norbert.cluster.{InvalidClusterException, Node} +import com.linkedin.norbert.network.common.Endpoint + +import scala.collection.immutable.HashSet +import scala.collection.mutable + +/** + * A partition key scheme which allows the client to specify which partition a request should go to. + * + * @param partitionId specifies the target partition + * @param requestKey used for further routing within the partition (e.g. consistent hashing within the partition) + * @tparam KeyType the type of the secondary key + */ +case class PartitionKey[KeyType](partitionId: Int, requestKey: KeyType) { + +} + +trait PartitionFallBackStrategy { + def nextPartition(currentPartition: Int): Int +} + +class DefaultPartitionFallbackStrategy(defaultPartition: Int) extends PartitionFallBackStrategy { + override def nextPartition(currentPartition: Int): Int = defaultPartition +} + + +/** + * A load balancer which allows the client to specify which partition a request should go to. + * + * The request is then routed to the load balancers of the appropriate partition. + * + * @param delegateLoadBalanders a map of partitionId -> load balancer delegate + * + * @tparam KeyType the type of the secondary key part + */ +class DirectPartitionedLoadBalancer[KeyType](delegateLoadBalanders: Map[Int, PartitionedLoadBalancer[KeyType]], + fallbackStrategy: PartitionFallBackStrategy) + extends PartitionedLoadBalancer[PartitionKey[KeyType]] { + + /** + * Retrieve the load balancer, or fallback to a different partition, if no load balancer exists for the + * current partition. + * + * If no load balancer can be found, throws an IllegalStateException + */ + def getLoadBalancer(partitionId: Int): PartitionedLoadBalancer[KeyType] = { + var lb = delegateLoadBalanders.get(partitionId) + if (lb == None) { + val nextPartitionId = fallbackStrategy.nextPartition(partitionId) + if (nextPartitionId != partitionId) { + // fallback once + lb = delegateLoadBalanders.get(nextPartitionId) + } + } + if (lb == None) { + throw new IllegalStateException("No load-balancer for partition " + partitionId) + } + lb.get + } + + override def nextNode(id: PartitionKey[KeyType], capability: Option[Long], persistentCapability: Option[Long]): Option[Node] = + getLoadBalancer(id.partitionId).nextNode(id.requestKey, capability, persistentCapability) + + override def nodesForOneReplica(id: PartitionKey[KeyType], capability: Option[Long], persistentCapability: Option[Long]): Map[Node, Set[Int]] = + getLoadBalancer(id.partitionId).nodesForOneReplica(id.requestKey, capability, persistentCapability) + + override def nodesForPartitions(id: PartitionKey[KeyType], partitions: Set[Int], capability: Option[Long], persistentCapability: Option[Long]): Map[Node, Set[Int]] = + throw new UnsupportedOperationException("Not implemented yet") + + override def nodesForPartitionedId(id: PartitionKey[KeyType], capability: Option[Long], persistentCapability: Option[Long]): Set[Node] = + getLoadBalancer(id.partitionId).nodesForPartitionedId(id.requestKey, capability, persistentCapability) + + override def nextNodes(id: PartitionKey[KeyType], capability: Option[Long], persistentCapability: Option[Long]): util.LinkedHashSet[Node] = + getLoadBalancer(id.partitionId).nextNodes(id.requestKey, capability, persistentCapability) +} + +class DirectPartitionedLoadBalancerFactory[KeyType](delegateFactory: PartitionedLoadBalancerFactory[KeyType], + fallbackStrategy: PartitionFallBackStrategy) extends PartitionedLoadBalancerFactory[PartitionKey[KeyType]] { + + @throws(classOf[InvalidClusterException]) + override def newLoadBalancer(nodes: Set[Endpoint]): PartitionedLoadBalancer[PartitionKey[KeyType]] = { + val map = mutable.HashMap[Int, Set[Endpoint]]() + nodes.foreach((e: Endpoint) => { + e.node.partitionIds.foreach((i: Int) => { + val setOpt = map.get(i) + if (setOpt == None) { + val set: Set[Endpoint] = new HashSet[Endpoint]() + e + map += (i -> set) + } else { + val set: Set[Endpoint] = setOpt.get + e + map += (i -> set) + } + }) + }) + + // pardon my crappy scala + val delegates : Map[Int,PartitionedLoadBalancer[KeyType]] = Map() ++ map.mapValues(v => delegateFactory.newLoadBalancer( v )) + new DirectPartitionedLoadBalancer[KeyType](delegates, fallbackStrategy) + } + + override def getNumPartitions(endpoints: Set[Endpoint]): Int = { + val partitionIds = endpoints.map((x: Endpoint) => x.node.partitionIds) + val flat = new HashSet[Int] ++ partitionIds + flat.size + } +} \ No newline at end of file diff --git a/network/src/main/scala/com/linkedin/norbert/network/partitioned/loadbalancer/HashPartitionedLoadBalancerFactory.scala b/network/src/main/scala/com/linkedin/norbert/network/partitioned/loadbalancer/HashPartitionedLoadBalancerFactory.scala new file mode 100644 index 00000000..c114936f --- /dev/null +++ b/network/src/main/scala/com/linkedin/norbert/network/partitioned/loadbalancer/HashPartitionedLoadBalancerFactory.scala @@ -0,0 +1,30 @@ +package com.linkedin.norbert.network.partitioned.loadbalancer + + +/** + * A very simple adapter of a load balancer factory that provides a default hashCode based implementation + * for hash calculations. + * + * Suitable for use within Java classes. + * + * @see com.linkedin.norbert.javacompat.network.ScalaLbfToJavaLbf + */ +class HashPartitionedLoadBalancerFactory[PartitionedId](numPartitions: Int, + numReplicas: Int, + hashFn: PartitionedId => Int, + endpointHashFn: String => Int, + serveRequestsIfPartitionMissing: Boolean) + extends PartitionedConsistentHashedLoadBalancerFactory[PartitionedId]( + numPartitions: Int, + numReplicas: Int, + hashFn: PartitionedId => Int, + endpointHashFn: String => Int, + serveRequestsIfPartitionMissing: Boolean) { + def this( numReplicas: Int) = { + this(-1, numReplicas, (p: PartitionedId) => p.hashCode, (p: String) => p.hashCode, true) + } + + def this( numPartitions: Int, numReplicas: Int) = { + this(numPartitions, numReplicas, (p: PartitionedId) => p.hashCode, (p: String) => p.hashCode, true) + } +} From caa21fb35edf1ddc5f0739a1390660444cee75a6 Mon Sep 17 00:00:00 2001 From: Nick Hristov Date: Mon, 1 Dec 2014 13:55:55 -0800 Subject: [PATCH 16/20] Bugfixes. Got the Norbert services to work with partitioning. Changed the Norbert version to have a suffix of -pb25 (protobuf 2.5). Altered the manner in which the session id is generated (it is now generated in the filter and passed onward into all services). --- gradle.properties | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gradle.properties b/gradle.properties index 0d2fc7dd..a95e474b 100644 --- a/gradle.properties +++ b/gradle.properties @@ -1,4 +1,4 @@ -version=0.6.85 +version=0.6.85-pb25 defaultScalaVersion=2.10.3 targetScalaVersions=2.10.3 crossBuild=false From b8197dcfbf84e53d85627184ad0e8c69256c5365 Mon Sep 17 00:00:00 2001 From: Nick Hristov Date: Thu, 11 Dec 2014 16:25:01 -0800 Subject: [PATCH 17/20] Fixed a couple of warnings I was getting in the build. --- ...ConsistentHashPartitionedLoadBalancerFactorySpec.scala | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/network/src/test/scala/com/linkedin/norbert/network/partitioned/loadbalancer/ConsistentHashPartitionedLoadBalancerFactorySpec.scala b/network/src/test/scala/com/linkedin/norbert/network/partitioned/loadbalancer/ConsistentHashPartitionedLoadBalancerFactorySpec.scala index c084e65d..27ecb63c 100644 --- a/network/src/test/scala/com/linkedin/norbert/network/partitioned/loadbalancer/ConsistentHashPartitionedLoadBalancerFactorySpec.scala +++ b/network/src/test/scala/com/linkedin/norbert/network/partitioned/loadbalancer/ConsistentHashPartitionedLoadBalancerFactorySpec.scala @@ -185,15 +185,15 @@ class ConsistentHashPartitionedLoadBalancerFactorySpec extends SpecificationWith val lb = loadBalancerFactory.newLoadBalancer(toEndpoints(nodes)) val accessVector = Array(0,0,0,0,0,0) (0 to 11).foreach { (i) => - val node1 : Node = lb.nextNode(EId(1210), Some(0x1L), Some(2L)).get + val node1 : Option[Node] = lb.nextNode(EId(1210), Some(0x1L), Some(2L)) if (!node1.eq(None)) - accessVector(node1.id) = accessVector(node1.id) + 1 + accessVector(node1.get.id) = accessVector(node1.get.id) + 1 } (0 to 11).foreach { (i) => - val node2 : Node = lb.nextNode(EId(1210), Some(0x2L), Some(2L)).get + val node2 : Option[Node] = lb.nextNode(EId(1210), Some(0x2L), Some(2L)) if (!node2.eq(None)) - accessVector(node2.id) = accessVector(node2.id) + 1 + accessVector(node2.get.id) = accessVector(node2.get.id) + 1 } accessVector(0) must be_==(accessVector(3)) From 4001387220e25012a0d1f100a41e1cb713d1e78c Mon Sep 17 00:00:00 2001 From: Nick Hristov Date: Thu, 11 Dec 2014 16:27:14 -0800 Subject: [PATCH 18/20] Modified the fail over feature of the partitioned norbert client to be able to fail over to multiple nodes. --- .../PartitionedNetworkClient.scala | 46 ++--- .../PartitionedNetworkClientSpec.scala | 174 +++++++++++++++++- 2 files changed, 195 insertions(+), 25 deletions(-) diff --git a/network/src/main/scala/com/linkedin/norbert/network/partitioned/PartitionedNetworkClient.scala b/network/src/main/scala/com/linkedin/norbert/network/partitioned/PartitionedNetworkClient.scala index e9ca097f..f3184eaf 100644 --- a/network/src/main/scala/com/linkedin/norbert/network/partitioned/PartitionedNetworkClient.scala +++ b/network/src/main/scala/com/linkedin/norbert/network/partitioned/PartitionedNetworkClient.scala @@ -29,6 +29,7 @@ import com.linkedin.norbert.network.partitioned.loadbalancer.{PartitionedLoadBal import com.linkedin.norbert.network.server.{MessageExecutorComponent, NetworkServer} import scala.beans.BeanProperty +import scala.collection.JavaConversions import scala.util.Random object RoutingConfigs { @@ -777,6 +778,8 @@ trait PartitionedNetworkClientFailOver[PartitionedId] extends PartitionedNetwork this: ClusterClientComponent with ClusterIoClientComponent with PartitionedLoadBalancerFactoryComponent[PartitionedId] => + val failOverAttempts:Int = 1 + override def sendRequest[RequestMsg, ResponseMsg](id: PartitionedId, request: RequestMsg, callback: Either[Throwable, ResponseMsg] => Unit, capability: Option[Long], persistentCapability: Option[Long]) (implicit is: InputSerializer[RequestMsg, ResponseMsg], os: OutputSerializer[RequestMsg, ResponseMsg]): Unit = doIfConnected { if (id == null || request == null) throw new NullPointerException @@ -787,32 +790,29 @@ trait PartitionedNetworkClientFailOver[PartitionedId] extends PartitionedNetwork if (nodes.isEmpty) { throw new NoNodesAvailableException("Unable to satisfy request, no node available for id %s".format(id)) } else { - val nodeIterator = nodes.iterator() - val firstNode = nodeIterator.next() - var failOverNode :Option[Node] = None - if (nodeIterator.hasNext) { - failOverNode = Option[Node](nodeIterator.next()) - } - val failOverCallback = (e:Either[Throwable, ResponseMsg]) => { - if (failOverNode.isDefined) { - e match { - case Left(ex:ConnectException) => failOverRequestToNextNode(firstNode, failOverNode.get, id, request, callback, capability, persistentCapability); - case Left(ex:Throwable) => callback.apply(e); - case Right(r:ResponseMsg) => callback.apply(e); - } - } else { - callback.apply(e) - } - () // force unit return type - } - doSendRequest(PartitionedRequest(request, firstNode, Set(id), (node: Node, ids: Set[PartitionedId]) => request, is, os, Option(failOverCallback))) + val nodeList:List[Node] = List() ++ JavaConversions.asScalaIterator(nodes.iterator()) + doSendRequest(nodeList, 0, id, request, callback, capability, persistentCapability) } } - def failOverRequestToNextNode[RequestMsg, ResponseMsg]( failureNode:Node, node:Node, id: PartitionedId, request: RequestMsg, callback: Either[Throwable, ResponseMsg] => Unit, capability: Option[Long], persistentCapability: Option[Long]) - (implicit is: InputSerializer[RequestMsg, ResponseMsg], os: OutputSerializer[RequestMsg, ResponseMsg]): Unit = doIfConnected { - log.warn("request to node %d failed, re-routing to node %d".format(failureNode.id, node.id)) - doSendRequest(PartitionedRequest(request, node, Set(id), (node: Node, ids: Set[PartitionedId]) => request, is, os, Option(callback))) + def doSendRequest[RequestMsg, ResponseMsg](nodes:List[Node], nodeIndex:Int, id: PartitionedId, request: RequestMsg, callback: Either[Throwable, ResponseMsg] => Unit, capability: Option[Long], persistentCapability: Option[Long]) + (implicit is: InputSerializer[RequestMsg, ResponseMsg], os: OutputSerializer[RequestMsg, ResponseMsg]): Unit = { + + val node:Node = nodes(nodeIndex) + + val failOverCallback = (e:Either[Throwable, ResponseMsg]) => { + if ((nodeIndex+1 < nodes.length) && nodeIndex < failOverAttempts) { + e match { + case Left(ex:ConnectException) => doSendRequest(nodes, nodeIndex + 1, id, request, callback, capability, persistentCapability); + case Left(ex:Throwable) => callback.apply(e); + case Right(r:ResponseMsg) => callback.apply(e); + } + } else { + callback.apply(e) + } + () // force unit return type + } + doSendRequest(PartitionedRequest(request, node, Set(id), (node: Node, ids: Set[PartitionedId]) => request, is, os, Option(failOverCallback))) } } \ No newline at end of file diff --git a/network/src/test/scala/com/linkedin/norbert/network/partitioned/PartitionedNetworkClientSpec.scala b/network/src/test/scala/com/linkedin/norbert/network/partitioned/PartitionedNetworkClientSpec.scala index 3fec5633..0499d016 100644 --- a/network/src/test/scala/com/linkedin/norbert/network/partitioned/PartitionedNetworkClientSpec.scala +++ b/network/src/test/scala/com/linkedin/norbert/network/partitioned/PartitionedNetworkClientSpec.scala @@ -17,13 +17,16 @@ package com.linkedin.norbert package network package partitioned +import java.net.ConnectException import java.util import java.util.concurrent.ExecutionException -import com.linkedin.norbert.cluster.{ClusterClientComponent, ClusterDisconnectedException, InvalidClusterException, Node} -import com.linkedin.norbert.network.common.{BaseNetworkClientSpecification, ClusterIoClientComponent} +import com.linkedin.norbert.cluster._ +import com.linkedin.norbert.network.common.{ClusterIoClientComponent, BaseNetworkClientSpecification} import com.linkedin.norbert.network.partitioned.loadbalancer._ +import scala.collection.JavaConversions + class PartitionedNetworkClientSpec extends BaseNetworkClientSpecification { val networkClient = new PartitionedNetworkClient[Int] with ClusterClientComponent with ClusterIoClientComponent with PartitionedLoadBalancerFactoryComponent[Int] { @@ -1014,5 +1017,172 @@ class PartitionedNetworkClientSpec extends BaseNetworkClientSpecification { } } + "PartitionedNetworkClientFailOver" should { + "fail-over to next node" in { + val nc2 = new PartitionedNetworkClientFailOver[Int] with ClusterClientComponent with ClusterIoClientComponent with PartitionedLoadBalancerFactoryComponent[Int] { + val lb = new PartitionedLoadBalancer[Int] { + var iter = PartitionedNetworkClientSpec.this.nodes.iterator + def nextNode(id: Int, c: Option[Long] = None, pc : Option[Long] = None) = { + if (!iter.hasNext ) iter = PartitionedNetworkClientSpec.this.nodes.iterator + Some(iter.next) + } + def nodesForOneReplica(id: Int, c: Option[Long] = None, pc: Option[Long] = None) = null + def nodesForPartitionedId(id:Int, c: Option[Long] = None, pc: Option[Long] = None) = null + def nodesForPartitions(id: Int, partitions: Set[Int], c: Option[Long] = None, pc: Option[Long] = None) = null + def nextNodes(id: Int, capability: Option[Long], persistentCapability: Option[Long]): util.LinkedHashSet[Node] = { + val result : util.LinkedHashSet[Node] = new util.LinkedHashSet() + result.addAll(JavaConversions.asJavaCollection( PartitionedNetworkClientSpec.this.nodes)) + result + } + } + val loadBalancerFactory = mock[PartitionedLoadBalancerFactory[Int]] + val clusterIoClient = new ClusterIoClient { + var invocationMap = Map(1 -> 0, 2 -> 0, 3 -> 0) + def sendMessage[RequestMsg, ResponseMsg](node: Node, requestCtx: Request[RequestMsg, ResponseMsg]) { + val oldVal = invocationMap(node.id) + invocationMap = invocationMap + (node.id -> (oldVal+1)) + if (node.id == 1) { + requestCtx.onFailure(new ConnectException with RequestAccess[Request[RequestMsg, ResponseMsg]] { + def request = requestCtx + }) + } else { + requestCtx.onSuccess(requestCtx.outputSerializer.requestToBytes(requestCtx.message)) + } + } + def nodesChanged(nodes: Set[Node]) = {PartitionedNetworkClientSpec.this.endpoints} + def shutdown {} + } + val clusterClient = PartitionedNetworkClientSpec.this.clusterClient + } + nc2.clusterClient.nodes returns nodeSet + nc2.clusterClient.isConnected returns true + nc2.loadBalancerFactory.newLoadBalancer(endpoints) returns nc2.lb + nc2.start + + // check pre-test assumptions + nc2.clusterIoClient.invocationMap(1) must be_==(0) + nc2.clusterIoClient.invocationMap(2) must be_==(0) + nc2.clusterIoClient.invocationMap(3) must be_==(0) + + nc2.sendRequest[Ping, Ping](0, request) + + // check post-test values + nc2.clusterIoClient.invocationMap(1) must be_==(1) + nc2.clusterIoClient.invocationMap(2) must be_==(1) + nc2.clusterIoClient.invocationMap(3) must be_==(0) + + } + + "fail when the fail over node fails" in { + val nc2 = new PartitionedNetworkClientFailOver[Int] with ClusterClientComponent with ClusterIoClientComponent with PartitionedLoadBalancerFactoryComponent[Int] { + val lb = new PartitionedLoadBalancer[Int] { + var iter = PartitionedNetworkClientSpec.this.nodes.iterator + def nextNode(id: Int, c: Option[Long] = None, pc : Option[Long] = None) = { + if (!iter.hasNext ) iter = PartitionedNetworkClientSpec.this.nodes.iterator + Some(iter.next) + } + def nodesForOneReplica(id: Int, c: Option[Long] = None, pc: Option[Long] = None) = null + def nodesForPartitionedId(id:Int, c: Option[Long] = None, pc: Option[Long] = None) = null + def nodesForPartitions(id: Int, partitions: Set[Int], c: Option[Long] = None, pc: Option[Long] = None) = null + def nextNodes(id: Int, capability: Option[Long], persistentCapability: Option[Long]): util.LinkedHashSet[Node] = { + val result : util.LinkedHashSet[Node] = new util.LinkedHashSet() + result.addAll(JavaConversions.asJavaCollection( PartitionedNetworkClientSpec.this.nodes)) + result + } + } + val loadBalancerFactory = mock[PartitionedLoadBalancerFactory[Int]] + val clusterIoClient = new ClusterIoClient { + var invocationMap = Map(1 -> 0, 2 -> 0, 3 -> 0) + def sendMessage[RequestMsg, ResponseMsg](node: Node, requestCtx: Request[RequestMsg, ResponseMsg]) { + val oldVal = invocationMap(node.id) + invocationMap = invocationMap + (node.id -> (oldVal+1)) + if (node.id == 1 || node.id == 2) { + requestCtx.onFailure(new ConnectException with RequestAccess[Request[RequestMsg, ResponseMsg]] { + def request = requestCtx + }) + } else { + requestCtx.onSuccess(requestCtx.outputSerializer.requestToBytes(requestCtx.message)) + } + } + def nodesChanged(nodes: Set[Node]) = {PartitionedNetworkClientSpec.this.endpoints} + def shutdown {} + } + val clusterClient = PartitionedNetworkClientSpec.this.clusterClient + } + nc2.clusterClient.nodes returns nodeSet + nc2.clusterClient.isConnected returns true + nc2.loadBalancerFactory.newLoadBalancer(endpoints) returns nc2.lb + nc2.start + + // check pre-test assumptions + nc2.clusterIoClient.invocationMap(1) must be_==(0) + nc2.clusterIoClient.invocationMap(2) must be_==(0) + nc2.clusterIoClient.invocationMap(3) must be_==(0) + + val future = nc2.sendRequest[Ping, Ping](0, request) + future.get must throwA[Exception] + + // check post-test values + nc2.clusterIoClient.invocationMap(1) must be_==(1) + nc2.clusterIoClient.invocationMap(2) must be_==(1) + nc2.clusterIoClient.invocationMap(3) must be_==(0) + + } + + "fail-over must propagate to multiple nodes, if fail over nodes fail as well" in { + val nc2 = new PartitionedNetworkClientFailOver[Int] with ClusterClientComponent with ClusterIoClientComponent with PartitionedLoadBalancerFactoryComponent[Int] { + override val failOverAttempts = 1000; + val lb = new PartitionedLoadBalancer[Int] { + var iter = PartitionedNetworkClientSpec.this.nodes.iterator + def nextNode(id: Int, c: Option[Long] = None, pc : Option[Long] = None) = { + if (!iter.hasNext ) iter = PartitionedNetworkClientSpec.this.nodes.iterator + Some(iter.next) + } + def nodesForOneReplica(id: Int, c: Option[Long] = None, pc: Option[Long] = None) = null + def nodesForPartitionedId(id:Int, c: Option[Long] = None, pc: Option[Long] = None) = null + def nodesForPartitions(id: Int, partitions: Set[Int], c: Option[Long] = None, pc: Option[Long] = None) = null + def nextNodes(id: Int, capability: Option[Long], persistentCapability: Option[Long]): util.LinkedHashSet[Node] = { + val result : util.LinkedHashSet[Node] = new util.LinkedHashSet() + result.addAll(JavaConversions.asJavaCollection( PartitionedNetworkClientSpec.this.nodes)) + result + } + } + val loadBalancerFactory = mock[PartitionedLoadBalancerFactory[Int]] + val clusterIoClient = new ClusterIoClient { + var invocationMap = Map(1 -> 0, 2 -> 0, 3 -> 0) + def sendMessage[RequestMsg, ResponseMsg](node: Node, requestCtx: Request[RequestMsg, ResponseMsg]) { + val oldVal = invocationMap(node.id) + invocationMap = invocationMap + (node.id -> (oldVal+1)) + if (node.id == 1 || node.id == 2) { + requestCtx.onFailure(new ConnectException with RequestAccess[Request[RequestMsg, ResponseMsg]] { + def request = requestCtx + }) + } else { + requestCtx.onSuccess(requestCtx.outputSerializer.requestToBytes(requestCtx.message)) + } + } + def nodesChanged(nodes: Set[Node]) = {PartitionedNetworkClientSpec.this.endpoints} + def shutdown {} + } + val clusterClient = PartitionedNetworkClientSpec.this.clusterClient + } + nc2.clusterClient.nodes returns nodeSet + nc2.clusterClient.isConnected returns true + nc2.loadBalancerFactory.newLoadBalancer(endpoints) returns nc2.lb + nc2.start + + // check pre-test assumptions + nc2.clusterIoClient.invocationMap(1) must be_==(0) + nc2.clusterIoClient.invocationMap(2) must be_==(0) + nc2.clusterIoClient.invocationMap(3) must be_==(0) + + nc2.sendRequest[Ping, Ping](0, request) + + // check post-test values + nc2.clusterIoClient.invocationMap(1) must be_==(1) + nc2.clusterIoClient.invocationMap(2) must be_==(1) + nc2.clusterIoClient.invocationMap(3) must be_==(1) + } + } def messageCustomizer(node: Node, ids: Set[Int]): Ping = new Ping } \ No newline at end of file From f7b5b98f20f66450371f043db23ba6f6b4d7d8f9 Mon Sep 17 00:00:00 2001 From: Nick Hristov Date: Thu, 18 Jun 2015 17:47:56 -0700 Subject: [PATCH 19/20] Pushed Scala to 2.10.4 and protoc to 2.6 --- build.gradle | 2 +- gradle.properties | 6 +++--- java-cluster/build.gradle | 1 + java-network/build.gradle | 1 + network/build.gradle | 1 + .../linkedin/norbert/network/util/ProtoUtils.scala | 2 +- project/Build.scala | 12 +++++++----- 7 files changed, 15 insertions(+), 10 deletions(-) diff --git a/build.gradle b/build.gradle index f8ad6284..99b6af86 100644 --- a/build.gradle +++ b/build.gradle @@ -33,7 +33,7 @@ subprojects { ext.externalDependency = [ 'zookeeper':'org.apache.zookeeper:zookeeper:3.3.4', - 'protobuf':'com.google.protobuf:protobuf-java:2.5.0', + 'protobuf':'com.google.protobuf:protobuf-java:2.6.0', 'log4j':'log4j:log4j:1.2.17', 'netty':'io.netty:netty:3.7.0.Final', 'slf4jApi':'org.slf4j:slf4j-api:1.7.5', diff --git a/gradle.properties b/gradle.properties index a95e474b..cee8190f 100644 --- a/gradle.properties +++ b/gradle.properties @@ -1,4 +1,4 @@ -version=0.6.85-pb25 -defaultScalaVersion=2.10.3 -targetScalaVersions=2.10.3 +version=0.6.85-tm-pb26 +defaultScalaVersion=2.10.4 +targetScalaVersions=2.10.4 crossBuild=false diff --git a/java-cluster/build.gradle b/java-cluster/build.gradle index dd59778b..28c4c1cf 100644 --- a/java-cluster/build.gradle +++ b/java-cluster/build.gradle @@ -4,5 +4,6 @@ apply plugin: 'scala' dependencies { compile project(":cluster$scalaSuffix") compile externalDependency.scalaLibrary + compile externalDependency.scalaActors } diff --git a/java-network/build.gradle b/java-network/build.gradle index 8d17490f..0946aa32 100644 --- a/java-network/build.gradle +++ b/java-network/build.gradle @@ -6,5 +6,6 @@ dependencies { compile project(":java-cluster$scalaSuffix") compile externalDependency.scalaLibrary + compile externalDependency.scalaActors } diff --git a/network/build.gradle b/network/build.gradle index 709fa614..80fce452 100644 --- a/network/build.gradle +++ b/network/build.gradle @@ -4,6 +4,7 @@ apply plugin: 'scala' dependencies { compile project(":cluster$scalaSuffix") compile externalDependency.scalaLibrary + compile externalDependency.scalaActors compile externalDependency.netty compile externalDependency.slf4jApi compile externalDependency.slf4jLog4j diff --git a/network/src/main/scala/com/linkedin/norbert/network/util/ProtoUtils.scala b/network/src/main/scala/com/linkedin/norbert/network/util/ProtoUtils.scala index ce595bab..8ebdc069 100644 --- a/network/src/main/scala/com/linkedin/norbert/network/util/ProtoUtils.scala +++ b/network/src/main/scala/com/linkedin/norbert/network/util/ProtoUtils.scala @@ -18,7 +18,7 @@ object ProtoUtils extends Logging { f } catch { case ex: Exception => - log.warn(ex, "Cannot eliminate a copy when converting a ByteString to a byte[]") + log.info(ex, "Cannot eliminate a copy when converting a ByteString to a byte[]") null } diff --git a/project/Build.scala b/project/Build.scala index 453db593..c01fa7d9 100644 --- a/project/Build.scala +++ b/project/Build.scala @@ -16,7 +16,7 @@ object BuildSettings { val buildSettings = Defaults.defaultSettings ++ Seq ( organization := "com.linkedin", version := "0.6.65", - scalaVersion := "2.8.1", + scalaVersion := "2.10.4", credentialsSetting, publishArtifact in (Compile, packageDoc) := false, publishTo <<= (version) { version: String => @@ -35,19 +35,21 @@ object Resolvers { object ClusterDependencies { val ZOOKEEPER_VER = "3.3.0" - val PROTOBUF_VER = "2.4.0a" + val PROTOBUF_VER = "2.6.0" val LOG4J_VER = "1.2.16" - val SPECS_VER = "1.6.7" + val SPECS_VER = "1.6.9" val MOCKITO_VER = "1.8.4" val CGLIB_VER = "2.1_3" val OBJENESIS = "1.0" val JUNIT_VER = "4.8.1" - + val AKKA_ACTOR_VER = "2.4" val zookeeper = "org.apache.zookeeper" % "zookeeper" % ZOOKEEPER_VER val protobuf = "com.google.protobuf" % "protobuf-java" % PROTOBUF_VER + val akkaActor = "com.typesafe.akka" % "akka-actor" % AKKA_ACTOR_VER + val log4j = "log4j" % "log4j" % LOG4J_VER val specs = "org.scala-tools.testing" %% "specs" % SPECS_VER % "test" @@ -60,7 +62,7 @@ object ClusterDependencies { val junit = "junit" % "junit" % JUNIT_VER % "test" - val deps = Seq(zookeeper, protobuf, log4j, specs, mockito, cglib, objenesis, junit) + val deps = Seq(zookeeper, protobuf, log4j, specs, mockito, cglib, objenesis, junit, akkaActor) } object NetworkDependencies { From 198624e36c57daadb8695e4ad30f058242a1f4db Mon Sep 17 00:00:00 2001 From: Nick Hristov Date: Sun, 9 Aug 2015 05:21:23 -0700 Subject: [PATCH 20/20] Updated version --- gradle.properties | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gradle.properties b/gradle.properties index cee8190f..96f073e3 100644 --- a/gradle.properties +++ b/gradle.properties @@ -1,4 +1,4 @@ -version=0.6.85-tm-pb26 +version=0.6.85-tm-pb26-2 defaultScalaVersion=2.10.4 targetScalaVersions=2.10.4 crossBuild=false