diff --git a/build.gradle b/build.gradle
index e2c395b3..99b6af86 100644
--- a/build.gradle
+++ b/build.gradle
@@ -14,6 +14,9 @@ def getScalaSuffix(scalaVersion) {
}
subprojects {
+ apply plugin: 'maven'
+
+ group = 'com.linkedin.norbert'
// the cross built scala modules share the same source directories so we need to make their output directories unique
buildDir = "${rootProject.buildDir}/$name"
@@ -30,7 +33,7 @@ subprojects {
ext.externalDependency = [
'zookeeper':'org.apache.zookeeper:zookeeper:3.3.4',
- 'protobuf':'com.google.protobuf:protobuf-java:2.4.0a',
+ 'protobuf':'com.google.protobuf:protobuf-java:2.6.0',
'log4j':'log4j:log4j:1.2.17',
'netty':'io.netty:netty:3.7.0.Final',
'slf4jApi':'org.slf4j:slf4j-api:1.7.5',
diff --git a/cluster/src/main/java/com/linkedin/norbert/protos/NorbertExampleProtos.java b/cluster/src/main/java/com/linkedin/norbert/protos/NorbertExampleProtos.java
index 79603582..db503891 100644
--- a/cluster/src/main/java/com/linkedin/norbert/protos/NorbertExampleProtos.java
+++ b/cluster/src/main/java/com/linkedin/norbert/protos/NorbertExampleProtos.java
@@ -1,5 +1,5 @@
// Generated by the protocol buffer compiler. DO NOT EDIT!
-// source: norbert_example.proto
+// source: protobuf/norbert_example.proto
package com.linkedin.norbert.protos;
@@ -10,50 +10,129 @@ public static void registerAllExtensions(
}
public interface PingOrBuilder
extends com.google.protobuf.MessageOrBuilder {
-
+
// required int64 timestamp = 1;
+ /**
+ * required int64 timestamp = 1;
+ */
boolean hasTimestamp();
+ /**
+ * required int64 timestamp = 1;
+ */
long getTimestamp();
}
+ /**
+ * Protobuf type {@code norbert.example.Ping}
+ */
public static final class Ping extends
com.google.protobuf.GeneratedMessage
implements PingOrBuilder {
// Use Ping.newBuilder() to construct.
- private Ping(Builder builder) {
+ private Ping(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
+ this.unknownFields = builder.getUnknownFields();
}
- private Ping(boolean noInit) {}
-
+ private Ping(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
private static final Ping defaultInstance;
public static Ping getDefaultInstance() {
return defaultInstance;
}
-
+
public Ping getDefaultInstanceForType() {
return defaultInstance;
}
-
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private Ping(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 8: {
+ bitField0_ |= 0x00000001;
+ timestamp_ = input.readInt64();
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return com.linkedin.norbert.protos.NorbertExampleProtos.internal_static_norbert_example_Ping_descriptor;
}
-
+
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
- return com.linkedin.norbert.protos.NorbertExampleProtos.internal_static_norbert_example_Ping_fieldAccessorTable;
+ return com.linkedin.norbert.protos.NorbertExampleProtos.internal_static_norbert_example_Ping_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ com.linkedin.norbert.protos.NorbertExampleProtos.Ping.class, com.linkedin.norbert.protos.NorbertExampleProtos.Ping.Builder.class);
}
-
+
+ public static com.google.protobuf.Parser PARSER =
+ new com.google.protobuf.AbstractParser() {
+ public Ping parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new Ping(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser getParserForType() {
+ return PARSER;
+ }
+
private int bitField0_;
// required int64 timestamp = 1;
public static final int TIMESTAMP_FIELD_NUMBER = 1;
private long timestamp_;
+ /**
+ * required int64 timestamp = 1;
+ */
public boolean hasTimestamp() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
+ /**
+ * required int64 timestamp = 1;
+ */
public long getTimestamp() {
return timestamp_;
}
-
+
private void initFields() {
timestamp_ = 0L;
}
@@ -61,7 +140,7 @@ private void initFields() {
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
-
+
if (!hasTimestamp()) {
memoizedIsInitialized = 0;
return false;
@@ -69,7 +148,7 @@ public final boolean isInitialized() {
memoizedIsInitialized = 1;
return true;
}
-
+
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
@@ -78,12 +157,12 @@ public void writeTo(com.google.protobuf.CodedOutputStream output)
}
getUnknownFields().writeTo(output);
}
-
+
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
-
+
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
@@ -93,92 +172,83 @@ public int getSerializedSize() {
memoizedSerializedSize = size;
return size;
}
-
+
+ private static final long serialVersionUID = 0L;
@java.lang.Override
- protected Object writeReplace() throws java.io.ObjectStreamException {
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
return super.writeReplace();
}
-
+
public static com.linkedin.norbert.protos.NorbertExampleProtos.Ping parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
- return newBuilder().mergeFrom(data).buildParsed();
+ return PARSER.parseFrom(data);
}
public static com.linkedin.norbert.protos.NorbertExampleProtos.Ping parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
- return newBuilder().mergeFrom(data, extensionRegistry)
- .buildParsed();
+ return PARSER.parseFrom(data, extensionRegistry);
}
public static com.linkedin.norbert.protos.NorbertExampleProtos.Ping parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
- return newBuilder().mergeFrom(data).buildParsed();
+ return PARSER.parseFrom(data);
}
public static com.linkedin.norbert.protos.NorbertExampleProtos.Ping parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
- return newBuilder().mergeFrom(data, extensionRegistry)
- .buildParsed();
+ return PARSER.parseFrom(data, extensionRegistry);
}
public static com.linkedin.norbert.protos.NorbertExampleProtos.Ping parseFrom(java.io.InputStream input)
throws java.io.IOException {
- return newBuilder().mergeFrom(input).buildParsed();
+ return PARSER.parseFrom(input);
}
public static com.linkedin.norbert.protos.NorbertExampleProtos.Ping parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
- return newBuilder().mergeFrom(input, extensionRegistry)
- .buildParsed();
+ return PARSER.parseFrom(input, extensionRegistry);
}
public static com.linkedin.norbert.protos.NorbertExampleProtos.Ping parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
- Builder builder = newBuilder();
- if (builder.mergeDelimitedFrom(input)) {
- return builder.buildParsed();
- } else {
- return null;
- }
+ return PARSER.parseDelimitedFrom(input);
}
public static com.linkedin.norbert.protos.NorbertExampleProtos.Ping parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
- Builder builder = newBuilder();
- if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
- return builder.buildParsed();
- } else {
- return null;
- }
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static com.linkedin.norbert.protos.NorbertExampleProtos.Ping parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
- return newBuilder().mergeFrom(input).buildParsed();
+ return PARSER.parseFrom(input);
}
public static com.linkedin.norbert.protos.NorbertExampleProtos.Ping parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
- return newBuilder().mergeFrom(input, extensionRegistry)
- .buildParsed();
+ return PARSER.parseFrom(input, extensionRegistry);
}
-
+
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(com.linkedin.norbert.protos.NorbertExampleProtos.Ping prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
-
+
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
+ /**
+ * Protobuf type {@code norbert.example.Ping}
+ */
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements com.linkedin.norbert.protos.NorbertExampleProtos.PingOrBuilder {
@@ -186,18 +256,21 @@ public static final class Builder extends
getDescriptor() {
return com.linkedin.norbert.protos.NorbertExampleProtos.internal_static_norbert_example_Ping_descriptor;
}
-
+
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
- return com.linkedin.norbert.protos.NorbertExampleProtos.internal_static_norbert_example_Ping_fieldAccessorTable;
+ return com.linkedin.norbert.protos.NorbertExampleProtos.internal_static_norbert_example_Ping_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ com.linkedin.norbert.protos.NorbertExampleProtos.Ping.class, com.linkedin.norbert.protos.NorbertExampleProtos.Ping.Builder.class);
}
-
+
// Construct using com.linkedin.norbert.protos.NorbertExampleProtos.Ping.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
-
- private Builder(BuilderParent parent) {
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
@@ -208,27 +281,27 @@ private void maybeForceBuilderInitialization() {
private static Builder create() {
return new Builder();
}
-
+
public Builder clear() {
super.clear();
timestamp_ = 0L;
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
-
+
public Builder clone() {
return create().mergeFrom(buildPartial());
}
-
+
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
- return com.linkedin.norbert.protos.NorbertExampleProtos.Ping.getDescriptor();
+ return com.linkedin.norbert.protos.NorbertExampleProtos.internal_static_norbert_example_Ping_descriptor;
}
-
+
public com.linkedin.norbert.protos.NorbertExampleProtos.Ping getDefaultInstanceForType() {
return com.linkedin.norbert.protos.NorbertExampleProtos.Ping.getDefaultInstance();
}
-
+
public com.linkedin.norbert.protos.NorbertExampleProtos.Ping build() {
com.linkedin.norbert.protos.NorbertExampleProtos.Ping result = buildPartial();
if (!result.isInitialized()) {
@@ -236,17 +309,7 @@ public com.linkedin.norbert.protos.NorbertExampleProtos.Ping build() {
}
return result;
}
-
- private com.linkedin.norbert.protos.NorbertExampleProtos.Ping buildParsed()
- throws com.google.protobuf.InvalidProtocolBufferException {
- com.linkedin.norbert.protos.NorbertExampleProtos.Ping result = buildPartial();
- if (!result.isInitialized()) {
- throw newUninitializedMessageException(
- result).asInvalidProtocolBufferException();
- }
- return result;
- }
-
+
public com.linkedin.norbert.protos.NorbertExampleProtos.Ping buildPartial() {
com.linkedin.norbert.protos.NorbertExampleProtos.Ping result = new com.linkedin.norbert.protos.NorbertExampleProtos.Ping(this);
int from_bitField0_ = bitField0_;
@@ -259,7 +322,7 @@ public com.linkedin.norbert.protos.NorbertExampleProtos.Ping buildPartial() {
onBuilt();
return result;
}
-
+
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof com.linkedin.norbert.protos.NorbertExampleProtos.Ping) {
return mergeFrom((com.linkedin.norbert.protos.NorbertExampleProtos.Ping)other);
@@ -268,7 +331,7 @@ public Builder mergeFrom(com.google.protobuf.Message other) {
return this;
}
}
-
+
public Builder mergeFrom(com.linkedin.norbert.protos.NorbertExampleProtos.Ping other) {
if (other == com.linkedin.norbert.protos.NorbertExampleProtos.Ping.getDefaultInstance()) return this;
if (other.hasTimestamp()) {
@@ -277,7 +340,7 @@ public Builder mergeFrom(com.linkedin.norbert.protos.NorbertExampleProtos.Ping o
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
-
+
public final boolean isInitialized() {
if (!hasTimestamp()) {
@@ -285,119 +348,195 @@ public final boolean isInitialized() {
}
return true;
}
-
+
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
- com.google.protobuf.UnknownFieldSet.Builder unknownFields =
- com.google.protobuf.UnknownFieldSet.newBuilder(
- this.getUnknownFields());
- while (true) {
- int tag = input.readTag();
- switch (tag) {
- case 0:
- this.setUnknownFields(unknownFields.build());
- onChanged();
- return this;
- default: {
- if (!parseUnknownField(input, unknownFields,
- extensionRegistry, tag)) {
- this.setUnknownFields(unknownFields.build());
- onChanged();
- return this;
- }
- break;
- }
- case 8: {
- bitField0_ |= 0x00000001;
- timestamp_ = input.readInt64();
- break;
- }
+ com.linkedin.norbert.protos.NorbertExampleProtos.Ping parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (com.linkedin.norbert.protos.NorbertExampleProtos.Ping) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
}
}
+ return this;
}
-
private int bitField0_;
-
+
// required int64 timestamp = 1;
private long timestamp_ ;
+ /**
+ * required int64 timestamp = 1;
+ */
public boolean hasTimestamp() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
+ /**
+ * required int64 timestamp = 1;
+ */
public long getTimestamp() {
return timestamp_;
}
+ /**
+ * required int64 timestamp = 1;
+ */
public Builder setTimestamp(long value) {
bitField0_ |= 0x00000001;
timestamp_ = value;
onChanged();
return this;
}
+ /**
+ * required int64 timestamp = 1;
+ */
public Builder clearTimestamp() {
bitField0_ = (bitField0_ & ~0x00000001);
timestamp_ = 0L;
onChanged();
return this;
}
-
+
// @@protoc_insertion_point(builder_scope:norbert.example.Ping)
}
-
+
static {
defaultInstance = new Ping(true);
defaultInstance.initFields();
}
-
+
// @@protoc_insertion_point(class_scope:norbert.example.Ping)
}
-
+
public interface PingResponseOrBuilder
extends com.google.protobuf.MessageOrBuilder {
-
+
// required int64 timestamp = 1;
+ /**
+ * required int64 timestamp = 1;
+ */
boolean hasTimestamp();
+ /**
+ * required int64 timestamp = 1;
+ */
long getTimestamp();
}
+ /**
+ * Protobuf type {@code norbert.example.PingResponse}
+ */
public static final class PingResponse extends
com.google.protobuf.GeneratedMessage
implements PingResponseOrBuilder {
// Use PingResponse.newBuilder() to construct.
- private PingResponse(Builder builder) {
+ private PingResponse(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
+ this.unknownFields = builder.getUnknownFields();
}
- private PingResponse(boolean noInit) {}
-
+ private PingResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
private static final PingResponse defaultInstance;
public static PingResponse getDefaultInstance() {
return defaultInstance;
}
-
+
public PingResponse getDefaultInstanceForType() {
return defaultInstance;
}
-
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private PingResponse(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 8: {
+ bitField0_ |= 0x00000001;
+ timestamp_ = input.readInt64();
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return com.linkedin.norbert.protos.NorbertExampleProtos.internal_static_norbert_example_PingResponse_descriptor;
}
-
+
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
- return com.linkedin.norbert.protos.NorbertExampleProtos.internal_static_norbert_example_PingResponse_fieldAccessorTable;
+ return com.linkedin.norbert.protos.NorbertExampleProtos.internal_static_norbert_example_PingResponse_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ com.linkedin.norbert.protos.NorbertExampleProtos.PingResponse.class, com.linkedin.norbert.protos.NorbertExampleProtos.PingResponse.Builder.class);
}
-
+
+ public static com.google.protobuf.Parser PARSER =
+ new com.google.protobuf.AbstractParser() {
+ public PingResponse parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new PingResponse(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser getParserForType() {
+ return PARSER;
+ }
+
private int bitField0_;
// required int64 timestamp = 1;
public static final int TIMESTAMP_FIELD_NUMBER = 1;
private long timestamp_;
+ /**
+ * required int64 timestamp = 1;
+ */
public boolean hasTimestamp() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
+ /**
+ * required int64 timestamp = 1;
+ */
public long getTimestamp() {
return timestamp_;
}
-
+
private void initFields() {
timestamp_ = 0L;
}
@@ -405,7 +544,7 @@ private void initFields() {
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
-
+
if (!hasTimestamp()) {
memoizedIsInitialized = 0;
return false;
@@ -413,7 +552,7 @@ public final boolean isInitialized() {
memoizedIsInitialized = 1;
return true;
}
-
+
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
@@ -422,12 +561,12 @@ public void writeTo(com.google.protobuf.CodedOutputStream output)
}
getUnknownFields().writeTo(output);
}
-
+
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
-
+
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
@@ -437,92 +576,83 @@ public int getSerializedSize() {
memoizedSerializedSize = size;
return size;
}
-
+
+ private static final long serialVersionUID = 0L;
@java.lang.Override
- protected Object writeReplace() throws java.io.ObjectStreamException {
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
return super.writeReplace();
}
-
+
public static com.linkedin.norbert.protos.NorbertExampleProtos.PingResponse parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
- return newBuilder().mergeFrom(data).buildParsed();
+ return PARSER.parseFrom(data);
}
public static com.linkedin.norbert.protos.NorbertExampleProtos.PingResponse parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
- return newBuilder().mergeFrom(data, extensionRegistry)
- .buildParsed();
+ return PARSER.parseFrom(data, extensionRegistry);
}
public static com.linkedin.norbert.protos.NorbertExampleProtos.PingResponse parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
- return newBuilder().mergeFrom(data).buildParsed();
+ return PARSER.parseFrom(data);
}
public static com.linkedin.norbert.protos.NorbertExampleProtos.PingResponse parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
- return newBuilder().mergeFrom(data, extensionRegistry)
- .buildParsed();
+ return PARSER.parseFrom(data, extensionRegistry);
}
public static com.linkedin.norbert.protos.NorbertExampleProtos.PingResponse parseFrom(java.io.InputStream input)
throws java.io.IOException {
- return newBuilder().mergeFrom(input).buildParsed();
+ return PARSER.parseFrom(input);
}
public static com.linkedin.norbert.protos.NorbertExampleProtos.PingResponse parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
- return newBuilder().mergeFrom(input, extensionRegistry)
- .buildParsed();
+ return PARSER.parseFrom(input, extensionRegistry);
}
public static com.linkedin.norbert.protos.NorbertExampleProtos.PingResponse parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
- Builder builder = newBuilder();
- if (builder.mergeDelimitedFrom(input)) {
- return builder.buildParsed();
- } else {
- return null;
- }
+ return PARSER.parseDelimitedFrom(input);
}
public static com.linkedin.norbert.protos.NorbertExampleProtos.PingResponse parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
- Builder builder = newBuilder();
- if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
- return builder.buildParsed();
- } else {
- return null;
- }
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static com.linkedin.norbert.protos.NorbertExampleProtos.PingResponse parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
- return newBuilder().mergeFrom(input).buildParsed();
+ return PARSER.parseFrom(input);
}
public static com.linkedin.norbert.protos.NorbertExampleProtos.PingResponse parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
- return newBuilder().mergeFrom(input, extensionRegistry)
- .buildParsed();
+ return PARSER.parseFrom(input, extensionRegistry);
}
-
+
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(com.linkedin.norbert.protos.NorbertExampleProtos.PingResponse prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
-
+
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
+ /**
+ * Protobuf type {@code norbert.example.PingResponse}
+ */
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements com.linkedin.norbert.protos.NorbertExampleProtos.PingResponseOrBuilder {
@@ -530,18 +660,21 @@ public static final class Builder extends
getDescriptor() {
return com.linkedin.norbert.protos.NorbertExampleProtos.internal_static_norbert_example_PingResponse_descriptor;
}
-
+
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
- return com.linkedin.norbert.protos.NorbertExampleProtos.internal_static_norbert_example_PingResponse_fieldAccessorTable;
+ return com.linkedin.norbert.protos.NorbertExampleProtos.internal_static_norbert_example_PingResponse_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ com.linkedin.norbert.protos.NorbertExampleProtos.PingResponse.class, com.linkedin.norbert.protos.NorbertExampleProtos.PingResponse.Builder.class);
}
-
+
// Construct using com.linkedin.norbert.protos.NorbertExampleProtos.PingResponse.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
-
- private Builder(BuilderParent parent) {
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
@@ -552,27 +685,27 @@ private void maybeForceBuilderInitialization() {
private static Builder create() {
return new Builder();
}
-
+
public Builder clear() {
super.clear();
timestamp_ = 0L;
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
-
+
public Builder clone() {
return create().mergeFrom(buildPartial());
}
-
+
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
- return com.linkedin.norbert.protos.NorbertExampleProtos.PingResponse.getDescriptor();
+ return com.linkedin.norbert.protos.NorbertExampleProtos.internal_static_norbert_example_PingResponse_descriptor;
}
-
+
public com.linkedin.norbert.protos.NorbertExampleProtos.PingResponse getDefaultInstanceForType() {
return com.linkedin.norbert.protos.NorbertExampleProtos.PingResponse.getDefaultInstance();
}
-
+
public com.linkedin.norbert.protos.NorbertExampleProtos.PingResponse build() {
com.linkedin.norbert.protos.NorbertExampleProtos.PingResponse result = buildPartial();
if (!result.isInitialized()) {
@@ -580,17 +713,7 @@ public com.linkedin.norbert.protos.NorbertExampleProtos.PingResponse build() {
}
return result;
}
-
- private com.linkedin.norbert.protos.NorbertExampleProtos.PingResponse buildParsed()
- throws com.google.protobuf.InvalidProtocolBufferException {
- com.linkedin.norbert.protos.NorbertExampleProtos.PingResponse result = buildPartial();
- if (!result.isInitialized()) {
- throw newUninitializedMessageException(
- result).asInvalidProtocolBufferException();
- }
- return result;
- }
-
+
public com.linkedin.norbert.protos.NorbertExampleProtos.PingResponse buildPartial() {
com.linkedin.norbert.protos.NorbertExampleProtos.PingResponse result = new com.linkedin.norbert.protos.NorbertExampleProtos.PingResponse(this);
int from_bitField0_ = bitField0_;
@@ -603,7 +726,7 @@ public com.linkedin.norbert.protos.NorbertExampleProtos.PingResponse buildPartia
onBuilt();
return result;
}
-
+
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof com.linkedin.norbert.protos.NorbertExampleProtos.PingResponse) {
return mergeFrom((com.linkedin.norbert.protos.NorbertExampleProtos.PingResponse)other);
@@ -612,7 +735,7 @@ public Builder mergeFrom(com.google.protobuf.Message other) {
return this;
}
}
-
+
public Builder mergeFrom(com.linkedin.norbert.protos.NorbertExampleProtos.PingResponse other) {
if (other == com.linkedin.norbert.protos.NorbertExampleProtos.PingResponse.getDefaultInstance()) return this;
if (other.hasTimestamp()) {
@@ -621,7 +744,7 @@ public Builder mergeFrom(com.linkedin.norbert.protos.NorbertExampleProtos.PingRe
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
-
+
public final boolean isInitialized() {
if (!hasTimestamp()) {
@@ -629,73 +752,70 @@ public final boolean isInitialized() {
}
return true;
}
-
+
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
- com.google.protobuf.UnknownFieldSet.Builder unknownFields =
- com.google.protobuf.UnknownFieldSet.newBuilder(
- this.getUnknownFields());
- while (true) {
- int tag = input.readTag();
- switch (tag) {
- case 0:
- this.setUnknownFields(unknownFields.build());
- onChanged();
- return this;
- default: {
- if (!parseUnknownField(input, unknownFields,
- extensionRegistry, tag)) {
- this.setUnknownFields(unknownFields.build());
- onChanged();
- return this;
- }
- break;
- }
- case 8: {
- bitField0_ |= 0x00000001;
- timestamp_ = input.readInt64();
- break;
- }
+ com.linkedin.norbert.protos.NorbertExampleProtos.PingResponse parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (com.linkedin.norbert.protos.NorbertExampleProtos.PingResponse) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
}
}
+ return this;
}
-
private int bitField0_;
-
+
// required int64 timestamp = 1;
private long timestamp_ ;
+ /**
+ * required int64 timestamp = 1;
+ */
public boolean hasTimestamp() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
+ /**
+ * required int64 timestamp = 1;
+ */
public long getTimestamp() {
return timestamp_;
}
+ /**
+ * required int64 timestamp = 1;
+ */
public Builder setTimestamp(long value) {
bitField0_ |= 0x00000001;
timestamp_ = value;
onChanged();
return this;
}
+ /**
+ * required int64 timestamp = 1;
+ */
public Builder clearTimestamp() {
bitField0_ = (bitField0_ & ~0x00000001);
timestamp_ = 0L;
onChanged();
return this;
}
-
+
// @@protoc_insertion_point(builder_scope:norbert.example.PingResponse)
}
-
+
static {
defaultInstance = new PingResponse(true);
defaultInstance.initFields();
}
-
+
// @@protoc_insertion_point(class_scope:norbert.example.PingResponse)
}
-
+
private static com.google.protobuf.Descriptors.Descriptor
internal_static_norbert_example_Ping_descriptor;
private static
@@ -706,7 +826,7 @@ public Builder clearTimestamp() {
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_norbert_example_PingResponse_fieldAccessorTable;
-
+
public static com.google.protobuf.Descriptors.FileDescriptor
getDescriptor() {
return descriptor;
@@ -715,10 +835,11 @@ public Builder clearTimestamp() {
descriptor;
static {
java.lang.String[] descriptorData = {
- "\n\025norbert_example.proto\022\017norbert.example" +
- "\"\031\n\004Ping\022\021\n\ttimestamp\030\001 \002(\003\"!\n\014PingRespo" +
- "nse\022\021\n\ttimestamp\030\001 \002(\003B5\n\033com.linkedin.n" +
- "orbert.protosB\024NorbertExampleProtosH\001"
+ "\n\036protobuf/norbert_example.proto\022\017norber" +
+ "t.example\"\031\n\004Ping\022\021\n\ttimestamp\030\001 \002(\003\"!\n\014" +
+ "PingResponse\022\021\n\ttimestamp\030\001 \002(\003B5\n\033com.l" +
+ "inkedin.norbert.protosB\024NorbertExamplePr" +
+ "otosH\001"
};
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@@ -730,17 +851,13 @@ public com.google.protobuf.ExtensionRegistry assignDescriptors(
internal_static_norbert_example_Ping_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_norbert_example_Ping_descriptor,
- new java.lang.String[] { "Timestamp", },
- com.linkedin.norbert.protos.NorbertExampleProtos.Ping.class,
- com.linkedin.norbert.protos.NorbertExampleProtos.Ping.Builder.class);
+ new java.lang.String[] { "Timestamp", });
internal_static_norbert_example_PingResponse_descriptor =
getDescriptor().getMessageTypes().get(1);
internal_static_norbert_example_PingResponse_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_norbert_example_PingResponse_descriptor,
- new java.lang.String[] { "Timestamp", },
- com.linkedin.norbert.protos.NorbertExampleProtos.PingResponse.class,
- com.linkedin.norbert.protos.NorbertExampleProtos.PingResponse.Builder.class);
+ new java.lang.String[] { "Timestamp", });
return null;
}
};
@@ -749,6 +866,6 @@ public com.google.protobuf.ExtensionRegistry assignDescriptors(
new com.google.protobuf.Descriptors.FileDescriptor[] {
}, assigner);
}
-
+
// @@protoc_insertion_point(outer_class_scope)
}
diff --git a/cluster/src/main/java/com/linkedin/norbert/protos/NorbertProtos.java b/cluster/src/main/java/com/linkedin/norbert/protos/NorbertProtos.java
index e3500a80..6f204cff 100644
--- a/cluster/src/main/java/com/linkedin/norbert/protos/NorbertProtos.java
+++ b/cluster/src/main/java/com/linkedin/norbert/protos/NorbertProtos.java
@@ -1,5 +1,5 @@
// Generated by the protocol buffer compiler. DO NOT EDIT!
-// source: norbert.proto
+// source: protobuf/norbert.proto
package com.linkedin.norbert.protos;
@@ -10,83 +10,274 @@ public static void registerAllExtensions(
}
public interface NorbertMessageOrBuilder
extends com.google.protobuf.MessageOrBuilder {
-
+
// required sfixed64 request_id_msb = 1;
+ /**
+ * required sfixed64 request_id_msb = 1;
+ */
boolean hasRequestIdMsb();
+ /**
+ * required sfixed64 request_id_msb = 1;
+ */
long getRequestIdMsb();
-
+
// required sfixed64 request_id_lsb = 2;
+ /**
+ * required sfixed64 request_id_lsb = 2;
+ */
boolean hasRequestIdLsb();
+ /**
+ * required sfixed64 request_id_lsb = 2;
+ */
long getRequestIdLsb();
-
+
// optional .norbert.NorbertMessage.Status status = 10 [default = OK];
+ /**
+ * optional .norbert.NorbertMessage.Status status = 10 [default = OK];
+ */
boolean hasStatus();
+ /**
+ * optional .norbert.NorbertMessage.Status status = 10 [default = OK];
+ */
com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Status getStatus();
-
+
// required string message_name = 11;
+ /**
+ * required string message_name = 11;
+ */
boolean hasMessageName();
- String getMessageName();
-
+ /**
+ * required string message_name = 11;
+ */
+ java.lang.String getMessageName();
+ /**
+ * required string message_name = 11;
+ */
+ com.google.protobuf.ByteString
+ getMessageNameBytes();
+
// optional bytes message = 12;
+ /**
+ * optional bytes message = 12;
+ */
boolean hasMessage();
+ /**
+ * optional bytes message = 12;
+ */
com.google.protobuf.ByteString getMessage();
-
+
// optional string error_message = 13;
+ /**
+ * optional string error_message = 13;
+ */
boolean hasErrorMessage();
- String getErrorMessage();
-
+ /**
+ * optional string error_message = 13;
+ */
+ java.lang.String getErrorMessage();
+ /**
+ * optional string error_message = 13;
+ */
+ com.google.protobuf.ByteString
+ getErrorMessageBytes();
+
// repeated .norbert.NorbertMessage.Header header = 14;
+ /**
+ * repeated .norbert.NorbertMessage.Header header = 14;
+ */
java.util.List
getHeaderList();
+ /**
+ * repeated .norbert.NorbertMessage.Header header = 14;
+ */
com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Header getHeader(int index);
+ /**
+ * repeated .norbert.NorbertMessage.Header header = 14;
+ */
int getHeaderCount();
+ /**
+ * repeated .norbert.NorbertMessage.Header header = 14;
+ */
java.util.List extends com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.HeaderOrBuilder>
getHeaderOrBuilderList();
+ /**
+ * repeated .norbert.NorbertMessage.Header header = 14;
+ */
com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.HeaderOrBuilder getHeaderOrBuilder(
int index);
}
+ /**
+ * Protobuf type {@code norbert.NorbertMessage}
+ */
public static final class NorbertMessage extends
com.google.protobuf.GeneratedMessage
implements NorbertMessageOrBuilder {
// Use NorbertMessage.newBuilder() to construct.
- private NorbertMessage(Builder builder) {
+ private NorbertMessage(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
+ this.unknownFields = builder.getUnknownFields();
}
- private NorbertMessage(boolean noInit) {}
-
+ private NorbertMessage(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
private static final NorbertMessage defaultInstance;
public static NorbertMessage getDefaultInstance() {
return defaultInstance;
}
-
+
public NorbertMessage getDefaultInstanceForType() {
return defaultInstance;
}
-
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private NorbertMessage(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 9: {
+ bitField0_ |= 0x00000001;
+ requestIdMsb_ = input.readSFixed64();
+ break;
+ }
+ case 17: {
+ bitField0_ |= 0x00000002;
+ requestIdLsb_ = input.readSFixed64();
+ break;
+ }
+ case 80: {
+ int rawValue = input.readEnum();
+ com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Status value = com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Status.valueOf(rawValue);
+ if (value == null) {
+ unknownFields.mergeVarintField(10, rawValue);
+ } else {
+ bitField0_ |= 0x00000004;
+ status_ = value;
+ }
+ break;
+ }
+ case 90: {
+ bitField0_ |= 0x00000008;
+ messageName_ = input.readBytes();
+ break;
+ }
+ case 98: {
+ bitField0_ |= 0x00000010;
+ message_ = input.readBytes();
+ break;
+ }
+ case 106: {
+ bitField0_ |= 0x00000020;
+ errorMessage_ = input.readBytes();
+ break;
+ }
+ case 114: {
+ if (!((mutable_bitField0_ & 0x00000040) == 0x00000040)) {
+ header_ = new java.util.ArrayList();
+ mutable_bitField0_ |= 0x00000040;
+ }
+ header_.add(input.readMessage(com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Header.PARSER, extensionRegistry));
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ if (((mutable_bitField0_ & 0x00000040) == 0x00000040)) {
+ header_ = java.util.Collections.unmodifiableList(header_);
+ }
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return com.linkedin.norbert.protos.NorbertProtos.internal_static_norbert_NorbertMessage_descriptor;
}
-
+
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
- return com.linkedin.norbert.protos.NorbertProtos.internal_static_norbert_NorbertMessage_fieldAccessorTable;
+ return com.linkedin.norbert.protos.NorbertProtos.internal_static_norbert_NorbertMessage_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.class, com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser PARSER =
+ new com.google.protobuf.AbstractParser() {
+ public NorbertMessage parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new NorbertMessage(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser getParserForType() {
+ return PARSER;
}
-
+
+ /**
+ * Protobuf enum {@code norbert.NorbertMessage.Status}
+ */
public enum Status
implements com.google.protobuf.ProtocolMessageEnum {
+ /**
+ * OK = 0;
+ */
OK(0, 0),
+ /**
+ * ERROR = 1;
+ */
ERROR(1, 1),
+ /**
+ * HEAVYLOAD = 2;
+ */
HEAVYLOAD(2, 2),
;
-
+
+ /**
+ * OK = 0;
+ */
public static final int OK_VALUE = 0;
+ /**
+ * ERROR = 1;
+ */
public static final int ERROR_VALUE = 1;
+ /**
+ * HEAVYLOAD = 2;
+ */
public static final int HEAVYLOAD_VALUE = 2;
-
-
+
+
public final int getNumber() { return value; }
-
+
public static Status valueOf(int value) {
switch (value) {
case 0: return OK;
@@ -95,7 +286,7 @@ public static Status valueOf(int value) {
default: return null;
}
}
-
+
public static com.google.protobuf.Internal.EnumLiteMap
internalGetValueMap() {
return internalValueMap;
@@ -107,7 +298,7 @@ public Status findValueByNumber(int number) {
return Status.valueOf(number);
}
};
-
+
public final com.google.protobuf.Descriptors.EnumValueDescriptor
getValueDescriptor() {
return getDescriptor().getValues().get(index);
@@ -120,11 +311,9 @@ public Status findValueByNumber(int number) {
getDescriptor() {
return com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.getDescriptor().getEnumTypes().get(0);
}
-
- private static final Status[] VALUES = {
- OK, ERROR, HEAVYLOAD,
- };
-
+
+ private static final Status[] VALUES = values();
+
public static Status valueOf(
com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
if (desc.getType() != getDescriptor()) {
@@ -133,122 +322,238 @@ public static Status valueOf(
}
return VALUES[desc.getIndex()];
}
-
+
private final int index;
private final int value;
-
+
private Status(int index, int value) {
this.index = index;
this.value = value;
}
-
+
// @@protoc_insertion_point(enum_scope:norbert.NorbertMessage.Status)
}
-
+
public interface HeaderOrBuilder
extends com.google.protobuf.MessageOrBuilder {
-
+
// required string key = 1;
+ /**
+ * required string key = 1;
+ */
boolean hasKey();
- String getKey();
-
+ /**
+ * required string key = 1;
+ */
+ java.lang.String getKey();
+ /**
+ * required string key = 1;
+ */
+ com.google.protobuf.ByteString
+ getKeyBytes();
+
// optional string value = 2;
+ /**
+ * optional string value = 2;
+ */
boolean hasValue();
- String getValue();
- }
+ /**
+ * optional string value = 2;
+ */
+ java.lang.String getValue();
+ /**
+ * optional string value = 2;
+ */
+ com.google.protobuf.ByteString
+ getValueBytes();
+ }
+ /**
+ * Protobuf type {@code norbert.NorbertMessage.Header}
+ */
public static final class Header extends
com.google.protobuf.GeneratedMessage
implements HeaderOrBuilder {
// Use Header.newBuilder() to construct.
- private Header(Builder builder) {
+ private Header(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
+ this.unknownFields = builder.getUnknownFields();
}
- private Header(boolean noInit) {}
-
+ private Header(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
private static final Header defaultInstance;
public static Header getDefaultInstance() {
return defaultInstance;
}
-
+
public Header getDefaultInstanceForType() {
return defaultInstance;
}
-
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private Header(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 10: {
+ bitField0_ |= 0x00000001;
+ key_ = input.readBytes();
+ break;
+ }
+ case 18: {
+ bitField0_ |= 0x00000002;
+ value_ = input.readBytes();
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return com.linkedin.norbert.protos.NorbertProtos.internal_static_norbert_NorbertMessage_Header_descriptor;
}
-
+
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
- return com.linkedin.norbert.protos.NorbertProtos.internal_static_norbert_NorbertMessage_Header_fieldAccessorTable;
+ return com.linkedin.norbert.protos.NorbertProtos.internal_static_norbert_NorbertMessage_Header_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Header.class, com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Header.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser PARSER =
+ new com.google.protobuf.AbstractParser() {
+ public Header parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new Header(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser getParserForType() {
+ return PARSER;
}
-
+
private int bitField0_;
// required string key = 1;
public static final int KEY_FIELD_NUMBER = 1;
private java.lang.Object key_;
+ /**
+ * required string key = 1;
+ */
public boolean hasKey() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
- public String getKey() {
+ /**
+ * required string key = 1;
+ */
+ public java.lang.String getKey() {
java.lang.Object ref = key_;
- if (ref instanceof String) {
- return (String) ref;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
- String s = bs.toStringUtf8();
- if (com.google.protobuf.Internal.isValidUtf8(bs)) {
+ java.lang.String s = bs.toStringUtf8();
+ if (bs.isValidUtf8()) {
key_ = s;
}
return s;
}
}
- private com.google.protobuf.ByteString getKeyBytes() {
+ /**
+ * required string key = 1;
+ */
+ public com.google.protobuf.ByteString
+ getKeyBytes() {
java.lang.Object ref = key_;
- if (ref instanceof String) {
+ if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
- com.google.protobuf.ByteString.copyFromUtf8((String) ref);
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
key_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
-
+
// optional string value = 2;
public static final int VALUE_FIELD_NUMBER = 2;
private java.lang.Object value_;
+ /**
+ * optional string value = 2;
+ */
public boolean hasValue() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
- public String getValue() {
+ /**
+ * optional string value = 2;
+ */
+ public java.lang.String getValue() {
java.lang.Object ref = value_;
- if (ref instanceof String) {
- return (String) ref;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
- String s = bs.toStringUtf8();
- if (com.google.protobuf.Internal.isValidUtf8(bs)) {
+ java.lang.String s = bs.toStringUtf8();
+ if (bs.isValidUtf8()) {
value_ = s;
}
return s;
}
}
- private com.google.protobuf.ByteString getValueBytes() {
+ /**
+ * optional string value = 2;
+ */
+ public com.google.protobuf.ByteString
+ getValueBytes() {
java.lang.Object ref = value_;
- if (ref instanceof String) {
+ if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
- com.google.protobuf.ByteString.copyFromUtf8((String) ref);
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
value_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
-
+
private void initFields() {
key_ = "";
value_ = "";
@@ -257,7 +562,7 @@ private void initFields() {
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
-
+
if (!hasKey()) {
memoizedIsInitialized = 0;
return false;
@@ -265,7 +570,7 @@ public final boolean isInitialized() {
memoizedIsInitialized = 1;
return true;
}
-
+
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
@@ -277,12 +582,12 @@ public void writeTo(com.google.protobuf.CodedOutputStream output)
}
getUnknownFields().writeTo(output);
}
-
+
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
-
+
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
@@ -296,94 +601,83 @@ public int getSerializedSize() {
memoizedSerializedSize = size;
return size;
}
-
+
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
-
+
public static com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Header parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
- return newBuilder().mergeFrom(data).buildParsed();
+ return PARSER.parseFrom(data);
}
public static com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Header parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
- return newBuilder().mergeFrom(data, extensionRegistry)
- .buildParsed();
+ return PARSER.parseFrom(data, extensionRegistry);
}
public static com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Header parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
- return newBuilder().mergeFrom(data).buildParsed();
+ return PARSER.parseFrom(data);
}
public static com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Header parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
- return newBuilder().mergeFrom(data, extensionRegistry)
- .buildParsed();
+ return PARSER.parseFrom(data, extensionRegistry);
}
public static com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Header parseFrom(java.io.InputStream input)
throws java.io.IOException {
- return newBuilder().mergeFrom(input).buildParsed();
+ return PARSER.parseFrom(input);
}
public static com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Header parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
- return newBuilder().mergeFrom(input, extensionRegistry)
- .buildParsed();
+ return PARSER.parseFrom(input, extensionRegistry);
}
public static com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Header parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
- Builder builder = newBuilder();
- if (builder.mergeDelimitedFrom(input)) {
- return builder.buildParsed();
- } else {
- return null;
- }
+ return PARSER.parseDelimitedFrom(input);
}
public static com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Header parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
- Builder builder = newBuilder();
- if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
- return builder.buildParsed();
- } else {
- return null;
- }
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Header parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
- return newBuilder().mergeFrom(input).buildParsed();
+ return PARSER.parseFrom(input);
}
public static com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Header parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
- return newBuilder().mergeFrom(input, extensionRegistry)
- .buildParsed();
+ return PARSER.parseFrom(input, extensionRegistry);
}
-
+
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Header prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
-
+
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
+ /**
+ * Protobuf type {@code norbert.NorbertMessage.Header}
+ */
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.HeaderOrBuilder {
@@ -391,18 +685,21 @@ public static final class Builder extends
getDescriptor() {
return com.linkedin.norbert.protos.NorbertProtos.internal_static_norbert_NorbertMessage_Header_descriptor;
}
-
+
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
- return com.linkedin.norbert.protos.NorbertProtos.internal_static_norbert_NorbertMessage_Header_fieldAccessorTable;
+ return com.linkedin.norbert.protos.NorbertProtos.internal_static_norbert_NorbertMessage_Header_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Header.class, com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Header.Builder.class);
}
-
+
// Construct using com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Header.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
-
- private Builder(BuilderParent parent) {
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
@@ -413,7 +710,7 @@ private void maybeForceBuilderInitialization() {
private static Builder create() {
return new Builder();
}
-
+
public Builder clear() {
super.clear();
key_ = "";
@@ -422,20 +719,20 @@ public Builder clear() {
bitField0_ = (bitField0_ & ~0x00000002);
return this;
}
-
+
public Builder clone() {
return create().mergeFrom(buildPartial());
}
-
+
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
- return com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Header.getDescriptor();
+ return com.linkedin.norbert.protos.NorbertProtos.internal_static_norbert_NorbertMessage_Header_descriptor;
}
-
+
public com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Header getDefaultInstanceForType() {
return com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Header.getDefaultInstance();
}
-
+
public com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Header build() {
com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Header result = buildPartial();
if (!result.isInitialized()) {
@@ -443,17 +740,7 @@ public com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Header build() {
}
return result;
}
-
- private com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Header buildParsed()
- throws com.google.protobuf.InvalidProtocolBufferException {
- com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Header result = buildPartial();
- if (!result.isInitialized()) {
- throw newUninitializedMessageException(
- result).asInvalidProtocolBufferException();
- }
- return result;
- }
-
+
public com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Header buildPartial() {
com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Header result = new com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Header(this);
int from_bitField0_ = bitField0_;
@@ -470,7 +757,7 @@ public com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Header buildPart
onBuilt();
return result;
}
-
+
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Header) {
return mergeFrom((com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Header)other);
@@ -479,19 +766,23 @@ public Builder mergeFrom(com.google.protobuf.Message other) {
return this;
}
}
-
+
public Builder mergeFrom(com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Header other) {
if (other == com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Header.getDefaultInstance()) return this;
if (other.hasKey()) {
- setKey(other.getKey());
+ bitField0_ |= 0x00000001;
+ key_ = other.key_;
+ onChanged();
}
if (other.hasValue()) {
- setValue(other.getValue());
+ bitField0_ |= 0x00000002;
+ value_ = other.value_;
+ onChanged();
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
-
+
public final boolean isInitialized() {
if (!hasKey()) {
@@ -499,62 +790,69 @@ public final boolean isInitialized() {
}
return true;
}
-
+
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
- com.google.protobuf.UnknownFieldSet.Builder unknownFields =
- com.google.protobuf.UnknownFieldSet.newBuilder(
- this.getUnknownFields());
- while (true) {
- int tag = input.readTag();
- switch (tag) {
- case 0:
- this.setUnknownFields(unknownFields.build());
- onChanged();
- return this;
- default: {
- if (!parseUnknownField(input, unknownFields,
- extensionRegistry, tag)) {
- this.setUnknownFields(unknownFields.build());
- onChanged();
- return this;
- }
- break;
- }
- case 10: {
- bitField0_ |= 0x00000001;
- key_ = input.readBytes();
- break;
- }
- case 18: {
- bitField0_ |= 0x00000002;
- value_ = input.readBytes();
- break;
- }
+ com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Header parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Header) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
}
}
+ return this;
}
-
private int bitField0_;
-
+
// required string key = 1;
private java.lang.Object key_ = "";
+ /**
+ * required string key = 1;
+ */
public boolean hasKey() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
- public String getKey() {
+ /**
+ * required string key = 1;
+ */
+ public java.lang.String getKey() {
java.lang.Object ref = key_;
- if (!(ref instanceof String)) {
- String s = ((com.google.protobuf.ByteString) ref).toStringUtf8();
+ if (!(ref instanceof java.lang.String)) {
+ java.lang.String s = ((com.google.protobuf.ByteString) ref)
+ .toStringUtf8();
key_ = s;
return s;
} else {
- return (String) ref;
+ return (java.lang.String) ref;
}
}
- public Builder setKey(String value) {
+ /**
+ * required string key = 1;
+ */
+ public com.google.protobuf.ByteString
+ getKeyBytes() {
+ java.lang.Object ref = key_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ key_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ * required string key = 1;
+ */
+ public Builder setKey(
+ java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
@@ -563,34 +861,72 @@ public Builder setKey(String value) {
onChanged();
return this;
}
+ /**
+ * required string key = 1;
+ */
public Builder clearKey() {
bitField0_ = (bitField0_ & ~0x00000001);
key_ = getDefaultInstance().getKey();
onChanged();
return this;
}
- void setKey(com.google.protobuf.ByteString value) {
- bitField0_ |= 0x00000001;
+ /**
+ * required string key = 1;
+ */
+ public Builder setKeyBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000001;
key_ = value;
onChanged();
+ return this;
}
-
+
// optional string value = 2;
private java.lang.Object value_ = "";
+ /**
+ * optional string value = 2;
+ */
public boolean hasValue() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
- public String getValue() {
+ /**
+ * optional string value = 2;
+ */
+ public java.lang.String getValue() {
java.lang.Object ref = value_;
- if (!(ref instanceof String)) {
- String s = ((com.google.protobuf.ByteString) ref).toStringUtf8();
+ if (!(ref instanceof java.lang.String)) {
+ java.lang.String s = ((com.google.protobuf.ByteString) ref)
+ .toStringUtf8();
value_ = s;
return s;
} else {
- return (String) ref;
+ return (java.lang.String) ref;
}
}
- public Builder setValue(String value) {
+ /**
+ * optional string value = 2;
+ */
+ public com.google.protobuf.ByteString
+ getValueBytes() {
+ java.lang.Object ref = value_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ value_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ * optional string value = 2;
+ */
+ public Builder setValue(
+ java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
@@ -599,155 +935,227 @@ public Builder setValue(String value) {
onChanged();
return this;
}
+ /**
+ * optional string value = 2;
+ */
public Builder clearValue() {
bitField0_ = (bitField0_ & ~0x00000002);
value_ = getDefaultInstance().getValue();
onChanged();
return this;
}
- void setValue(com.google.protobuf.ByteString value) {
- bitField0_ |= 0x00000002;
+ /**
+ * optional string value = 2;
+ */
+ public Builder setValueBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000002;
value_ = value;
onChanged();
+ return this;
}
-
+
// @@protoc_insertion_point(builder_scope:norbert.NorbertMessage.Header)
}
-
+
static {
defaultInstance = new Header(true);
defaultInstance.initFields();
}
-
+
// @@protoc_insertion_point(class_scope:norbert.NorbertMessage.Header)
}
-
+
private int bitField0_;
// required sfixed64 request_id_msb = 1;
public static final int REQUEST_ID_MSB_FIELD_NUMBER = 1;
private long requestIdMsb_;
+ /**
+ * required sfixed64 request_id_msb = 1;
+ */
public boolean hasRequestIdMsb() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
+ /**
+ * required sfixed64 request_id_msb = 1;
+ */
public long getRequestIdMsb() {
return requestIdMsb_;
}
-
+
// required sfixed64 request_id_lsb = 2;
public static final int REQUEST_ID_LSB_FIELD_NUMBER = 2;
private long requestIdLsb_;
+ /**
+ * required sfixed64 request_id_lsb = 2;
+ */
public boolean hasRequestIdLsb() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
+ /**
+ * required sfixed64 request_id_lsb = 2;
+ */
public long getRequestIdLsb() {
return requestIdLsb_;
}
-
+
// optional .norbert.NorbertMessage.Status status = 10 [default = OK];
public static final int STATUS_FIELD_NUMBER = 10;
private com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Status status_;
+ /**
+ * optional .norbert.NorbertMessage.Status status = 10 [default = OK];
+ */
public boolean hasStatus() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
+ /**
+ * optional .norbert.NorbertMessage.Status status = 10 [default = OK];
+ */
public com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Status getStatus() {
return status_;
}
-
+
// required string message_name = 11;
public static final int MESSAGE_NAME_FIELD_NUMBER = 11;
private java.lang.Object messageName_;
+ /**
+ * required string message_name = 11;
+ */
public boolean hasMessageName() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
- public String getMessageName() {
+ /**
+ * required string message_name = 11;
+ */
+ public java.lang.String getMessageName() {
java.lang.Object ref = messageName_;
- if (ref instanceof String) {
- return (String) ref;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
- String s = bs.toStringUtf8();
- if (com.google.protobuf.Internal.isValidUtf8(bs)) {
+ java.lang.String s = bs.toStringUtf8();
+ if (bs.isValidUtf8()) {
messageName_ = s;
}
return s;
}
}
- private com.google.protobuf.ByteString getMessageNameBytes() {
+ /**
+ * required string message_name = 11;
+ */
+ public com.google.protobuf.ByteString
+ getMessageNameBytes() {
java.lang.Object ref = messageName_;
- if (ref instanceof String) {
+ if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
- com.google.protobuf.ByteString.copyFromUtf8((String) ref);
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
messageName_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
-
+
// optional bytes message = 12;
public static final int MESSAGE_FIELD_NUMBER = 12;
private com.google.protobuf.ByteString message_;
+ /**
+ * optional bytes message = 12;
+ */
public boolean hasMessage() {
return ((bitField0_ & 0x00000010) == 0x00000010);
}
+ /**
+ * optional bytes message = 12;
+ */
public com.google.protobuf.ByteString getMessage() {
return message_;
}
-
+
// optional string error_message = 13;
public static final int ERROR_MESSAGE_FIELD_NUMBER = 13;
private java.lang.Object errorMessage_;
+ /**
+ * optional string error_message = 13;
+ */
public boolean hasErrorMessage() {
return ((bitField0_ & 0x00000020) == 0x00000020);
}
- public String getErrorMessage() {
+ /**
+ * optional string error_message = 13;
+ */
+ public java.lang.String getErrorMessage() {
java.lang.Object ref = errorMessage_;
- if (ref instanceof String) {
- return (String) ref;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
- String s = bs.toStringUtf8();
- if (com.google.protobuf.Internal.isValidUtf8(bs)) {
+ java.lang.String s = bs.toStringUtf8();
+ if (bs.isValidUtf8()) {
errorMessage_ = s;
}
return s;
}
}
- private com.google.protobuf.ByteString getErrorMessageBytes() {
+ /**
+ * optional string error_message = 13;
+ */
+ public com.google.protobuf.ByteString
+ getErrorMessageBytes() {
java.lang.Object ref = errorMessage_;
- if (ref instanceof String) {
+ if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
- com.google.protobuf.ByteString.copyFromUtf8((String) ref);
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
errorMessage_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
-
+
// repeated .norbert.NorbertMessage.Header header = 14;
public static final int HEADER_FIELD_NUMBER = 14;
private java.util.List header_;
+ /**
+ * repeated .norbert.NorbertMessage.Header header = 14;
+ */
public java.util.List getHeaderList() {
return header_;
}
+ /**
+ * repeated .norbert.NorbertMessage.Header header = 14;
+ */
public java.util.List extends com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.HeaderOrBuilder>
getHeaderOrBuilderList() {
return header_;
}
+ /**
+ * repeated .norbert.NorbertMessage.Header header = 14;
+ */
public int getHeaderCount() {
return header_.size();
}
+ /**
+ * repeated .norbert.NorbertMessage.Header header = 14;
+ */
public com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Header getHeader(int index) {
return header_.get(index);
}
+ /**
+ * repeated .norbert.NorbertMessage.Header header = 14;
+ */
public com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.HeaderOrBuilder getHeaderOrBuilder(
int index) {
return header_.get(index);
}
-
+
private void initFields() {
requestIdMsb_ = 0L;
requestIdLsb_ = 0L;
@@ -761,7 +1169,7 @@ private void initFields() {
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
-
+
if (!hasRequestIdMsb()) {
memoizedIsInitialized = 0;
return false;
@@ -783,7 +1191,7 @@ public final boolean isInitialized() {
memoizedIsInitialized = 1;
return true;
}
-
+
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
@@ -810,12 +1218,12 @@ public void writeTo(com.google.protobuf.CodedOutputStream output)
}
getUnknownFields().writeTo(output);
}
-
+
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
-
+
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
@@ -849,94 +1257,83 @@ public int getSerializedSize() {
memoizedSerializedSize = size;
return size;
}
-
+
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
-
+
public static com.linkedin.norbert.protos.NorbertProtos.NorbertMessage parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
- return newBuilder().mergeFrom(data).buildParsed();
+ return PARSER.parseFrom(data);
}
public static com.linkedin.norbert.protos.NorbertProtos.NorbertMessage parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
- return newBuilder().mergeFrom(data, extensionRegistry)
- .buildParsed();
+ return PARSER.parseFrom(data, extensionRegistry);
}
public static com.linkedin.norbert.protos.NorbertProtos.NorbertMessage parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
- return newBuilder().mergeFrom(data).buildParsed();
+ return PARSER.parseFrom(data);
}
public static com.linkedin.norbert.protos.NorbertProtos.NorbertMessage parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
- return newBuilder().mergeFrom(data, extensionRegistry)
- .buildParsed();
+ return PARSER.parseFrom(data, extensionRegistry);
}
public static com.linkedin.norbert.protos.NorbertProtos.NorbertMessage parseFrom(java.io.InputStream input)
throws java.io.IOException {
- return newBuilder().mergeFrom(input).buildParsed();
+ return PARSER.parseFrom(input);
}
public static com.linkedin.norbert.protos.NorbertProtos.NorbertMessage parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
- return newBuilder().mergeFrom(input, extensionRegistry)
- .buildParsed();
+ return PARSER.parseFrom(input, extensionRegistry);
}
public static com.linkedin.norbert.protos.NorbertProtos.NorbertMessage parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
- Builder builder = newBuilder();
- if (builder.mergeDelimitedFrom(input)) {
- return builder.buildParsed();
- } else {
- return null;
- }
+ return PARSER.parseDelimitedFrom(input);
}
public static com.linkedin.norbert.protos.NorbertProtos.NorbertMessage parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
- Builder builder = newBuilder();
- if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
- return builder.buildParsed();
- } else {
- return null;
- }
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static com.linkedin.norbert.protos.NorbertProtos.NorbertMessage parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
- return newBuilder().mergeFrom(input).buildParsed();
+ return PARSER.parseFrom(input);
}
public static com.linkedin.norbert.protos.NorbertProtos.NorbertMessage parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
- return newBuilder().mergeFrom(input, extensionRegistry)
- .buildParsed();
+ return PARSER.parseFrom(input, extensionRegistry);
}
-
+
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(com.linkedin.norbert.protos.NorbertProtos.NorbertMessage prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
-
+
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
+ /**
+ * Protobuf type {@code norbert.NorbertMessage}
+ */
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements com.linkedin.norbert.protos.NorbertProtos.NorbertMessageOrBuilder {
@@ -944,18 +1341,21 @@ public static final class Builder extends
getDescriptor() {
return com.linkedin.norbert.protos.NorbertProtos.internal_static_norbert_NorbertMessage_descriptor;
}
-
+
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
- return com.linkedin.norbert.protos.NorbertProtos.internal_static_norbert_NorbertMessage_fieldAccessorTable;
+ return com.linkedin.norbert.protos.NorbertProtos.internal_static_norbert_NorbertMessage_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.class, com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Builder.class);
}
-
+
// Construct using com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
-
- private Builder(BuilderParent parent) {
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
@@ -967,7 +1367,7 @@ private void maybeForceBuilderInitialization() {
private static Builder create() {
return new Builder();
}
-
+
public Builder clear() {
super.clear();
requestIdMsb_ = 0L;
@@ -990,20 +1390,20 @@ public Builder clear() {
}
return this;
}
-
+
public Builder clone() {
return create().mergeFrom(buildPartial());
}
-
+
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
- return com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.getDescriptor();
+ return com.linkedin.norbert.protos.NorbertProtos.internal_static_norbert_NorbertMessage_descriptor;
}
-
+
public com.linkedin.norbert.protos.NorbertProtos.NorbertMessage getDefaultInstanceForType() {
return com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.getDefaultInstance();
}
-
+
public com.linkedin.norbert.protos.NorbertProtos.NorbertMessage build() {
com.linkedin.norbert.protos.NorbertProtos.NorbertMessage result = buildPartial();
if (!result.isInitialized()) {
@@ -1011,17 +1411,7 @@ public com.linkedin.norbert.protos.NorbertProtos.NorbertMessage build() {
}
return result;
}
-
- private com.linkedin.norbert.protos.NorbertProtos.NorbertMessage buildParsed()
- throws com.google.protobuf.InvalidProtocolBufferException {
- com.linkedin.norbert.protos.NorbertProtos.NorbertMessage result = buildPartial();
- if (!result.isInitialized()) {
- throw newUninitializedMessageException(
- result).asInvalidProtocolBufferException();
- }
- return result;
- }
-
+
public com.linkedin.norbert.protos.NorbertProtos.NorbertMessage buildPartial() {
com.linkedin.norbert.protos.NorbertProtos.NorbertMessage result = new com.linkedin.norbert.protos.NorbertProtos.NorbertMessage(this);
int from_bitField0_ = bitField0_;
@@ -1063,7 +1453,7 @@ public com.linkedin.norbert.protos.NorbertProtos.NorbertMessage buildPartial() {
onBuilt();
return result;
}
-
+
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof com.linkedin.norbert.protos.NorbertProtos.NorbertMessage) {
return mergeFrom((com.linkedin.norbert.protos.NorbertProtos.NorbertMessage)other);
@@ -1072,7 +1462,7 @@ public Builder mergeFrom(com.google.protobuf.Message other) {
return this;
}
}
-
+
public Builder mergeFrom(com.linkedin.norbert.protos.NorbertProtos.NorbertMessage other) {
if (other == com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.getDefaultInstance()) return this;
if (other.hasRequestIdMsb()) {
@@ -1085,13 +1475,17 @@ public Builder mergeFrom(com.linkedin.norbert.protos.NorbertProtos.NorbertMessag
setStatus(other.getStatus());
}
if (other.hasMessageName()) {
- setMessageName(other.getMessageName());
+ bitField0_ |= 0x00000008;
+ messageName_ = other.messageName_;
+ onChanged();
}
if (other.hasMessage()) {
setMessage(other.getMessage());
}
if (other.hasErrorMessage()) {
- setErrorMessage(other.getErrorMessage());
+ bitField0_ |= 0x00000020;
+ errorMessage_ = other.errorMessage_;
+ onChanged();
}
if (headerBuilder_ == null) {
if (!other.header_.isEmpty()) {
@@ -1102,170 +1496,151 @@ public Builder mergeFrom(com.linkedin.norbert.protos.NorbertProtos.NorbertMessag
ensureHeaderIsMutable();
header_.addAll(other.header_);
}
- onChanged();
- }
- } else {
- if (!other.header_.isEmpty()) {
- if (headerBuilder_.isEmpty()) {
- headerBuilder_.dispose();
- headerBuilder_ = null;
- header_ = other.header_;
- bitField0_ = (bitField0_ & ~0x00000040);
- headerBuilder_ =
- com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
- getHeaderFieldBuilder() : null;
- } else {
- headerBuilder_.addAllMessages(other.header_);
- }
- }
- }
- this.mergeUnknownFields(other.getUnknownFields());
- return this;
- }
-
- public final boolean isInitialized() {
- if (!hasRequestIdMsb()) {
-
- return false;
- }
- if (!hasRequestIdLsb()) {
-
- return false;
- }
- if (!hasMessageName()) {
-
- return false;
- }
- for (int i = 0; i < getHeaderCount(); i++) {
- if (!getHeader(i).isInitialized()) {
-
- return false;
- }
- }
- return true;
- }
-
- public Builder mergeFrom(
- com.google.protobuf.CodedInputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws java.io.IOException {
- com.google.protobuf.UnknownFieldSet.Builder unknownFields =
- com.google.protobuf.UnknownFieldSet.newBuilder(
- this.getUnknownFields());
- while (true) {
- int tag = input.readTag();
- switch (tag) {
- case 0:
- this.setUnknownFields(unknownFields.build());
- onChanged();
- return this;
- default: {
- if (!parseUnknownField(input, unknownFields,
- extensionRegistry, tag)) {
- this.setUnknownFields(unknownFields.build());
- onChanged();
- return this;
- }
- break;
- }
- case 9: {
- bitField0_ |= 0x00000001;
- requestIdMsb_ = input.readSFixed64();
- break;
- }
- case 17: {
- bitField0_ |= 0x00000002;
- requestIdLsb_ = input.readSFixed64();
- break;
- }
- case 80: {
- int rawValue = input.readEnum();
- com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Status value = com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Status.valueOf(rawValue);
- if (value == null) {
- unknownFields.mergeVarintField(10, rawValue);
- } else {
- bitField0_ |= 0x00000004;
- status_ = value;
- }
- break;
- }
- case 90: {
- bitField0_ |= 0x00000008;
- messageName_ = input.readBytes();
- break;
- }
- case 98: {
- bitField0_ |= 0x00000010;
- message_ = input.readBytes();
- break;
- }
- case 106: {
- bitField0_ |= 0x00000020;
- errorMessage_ = input.readBytes();
- break;
- }
- case 114: {
- com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Header.Builder subBuilder = com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Header.newBuilder();
- input.readMessage(subBuilder, extensionRegistry);
- addHeader(subBuilder.buildPartial());
- break;
- }
+ onChanged();
+ }
+ } else {
+ if (!other.header_.isEmpty()) {
+ if (headerBuilder_.isEmpty()) {
+ headerBuilder_.dispose();
+ headerBuilder_ = null;
+ header_ = other.header_;
+ bitField0_ = (bitField0_ & ~0x00000040);
+ headerBuilder_ =
+ com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
+ getHeaderFieldBuilder() : null;
+ } else {
+ headerBuilder_.addAllMessages(other.header_);
+ }
+ }
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ if (!hasRequestIdMsb()) {
+
+ return false;
+ }
+ if (!hasRequestIdLsb()) {
+
+ return false;
+ }
+ if (!hasMessageName()) {
+
+ return false;
+ }
+ for (int i = 0; i < getHeaderCount(); i++) {
+ if (!getHeader(i).isInitialized()) {
+
+ return false;
+ }
+ }
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ com.linkedin.norbert.protos.NorbertProtos.NorbertMessage parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (com.linkedin.norbert.protos.NorbertProtos.NorbertMessage) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
}
}
+ return this;
}
-
private int bitField0_;
-
+
// required sfixed64 request_id_msb = 1;
private long requestIdMsb_ ;
+ /**
+ * required sfixed64 request_id_msb = 1;
+ */
public boolean hasRequestIdMsb() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
+ /**
+ * required sfixed64 request_id_msb = 1;
+ */
public long getRequestIdMsb() {
return requestIdMsb_;
}
+ /**
+ * required sfixed64 request_id_msb = 1;
+ */
public Builder setRequestIdMsb(long value) {
bitField0_ |= 0x00000001;
requestIdMsb_ = value;
onChanged();
return this;
}
+ /**
+ * required sfixed64 request_id_msb = 1;
+ */
public Builder clearRequestIdMsb() {
bitField0_ = (bitField0_ & ~0x00000001);
requestIdMsb_ = 0L;
onChanged();
return this;
}
-
+
// required sfixed64 request_id_lsb = 2;
private long requestIdLsb_ ;
+ /**
+ * required sfixed64 request_id_lsb = 2;
+ */
public boolean hasRequestIdLsb() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
+ /**
+ * required sfixed64 request_id_lsb = 2;
+ */
public long getRequestIdLsb() {
return requestIdLsb_;
}
+ /**
+ * required sfixed64 request_id_lsb = 2;
+ */
public Builder setRequestIdLsb(long value) {
bitField0_ |= 0x00000002;
requestIdLsb_ = value;
onChanged();
return this;
}
+ /**
+ * required sfixed64 request_id_lsb = 2;
+ */
public Builder clearRequestIdLsb() {
bitField0_ = (bitField0_ & ~0x00000002);
requestIdLsb_ = 0L;
onChanged();
return this;
}
-
+
// optional .norbert.NorbertMessage.Status status = 10 [default = OK];
private com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Status status_ = com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Status.OK;
+ /**
+ * optional .norbert.NorbertMessage.Status status = 10 [default = OK];
+ */
public boolean hasStatus() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
+ /**
+ * optional .norbert.NorbertMessage.Status status = 10 [default = OK];
+ */
public com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Status getStatus() {
return status_;
}
+ /**
+ * optional .norbert.NorbertMessage.Status status = 10 [default = OK];
+ */
public Builder setStatus(com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Status value) {
if (value == null) {
throw new NullPointerException();
@@ -1275,29 +1650,59 @@ public Builder setStatus(com.linkedin.norbert.protos.NorbertProtos.NorbertMessag
onChanged();
return this;
}
+ /**
+ * optional .norbert.NorbertMessage.Status status = 10 [default = OK];
+ */
public Builder clearStatus() {
bitField0_ = (bitField0_ & ~0x00000004);
status_ = com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Status.OK;
onChanged();
return this;
}
-
+
// required string message_name = 11;
private java.lang.Object messageName_ = "";
+ /**
+ * required string message_name = 11;
+ */
public boolean hasMessageName() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
- public String getMessageName() {
+ /**
+ * required string message_name = 11;
+ */
+ public java.lang.String getMessageName() {
java.lang.Object ref = messageName_;
- if (!(ref instanceof String)) {
- String s = ((com.google.protobuf.ByteString) ref).toStringUtf8();
+ if (!(ref instanceof java.lang.String)) {
+ java.lang.String s = ((com.google.protobuf.ByteString) ref)
+ .toStringUtf8();
messageName_ = s;
return s;
} else {
- return (String) ref;
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ * required string message_name = 11;
+ */
+ public com.google.protobuf.ByteString
+ getMessageNameBytes() {
+ java.lang.Object ref = messageName_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ messageName_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
}
}
- public Builder setMessageName(String value) {
+ /**
+ * required string message_name = 11;
+ */
+ public Builder setMessageName(
+ java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
@@ -1306,26 +1711,46 @@ public Builder setMessageName(String value) {
onChanged();
return this;
}
+ /**
+ * required string message_name = 11;
+ */
public Builder clearMessageName() {
bitField0_ = (bitField0_ & ~0x00000008);
messageName_ = getDefaultInstance().getMessageName();
onChanged();
return this;
}
- void setMessageName(com.google.protobuf.ByteString value) {
- bitField0_ |= 0x00000008;
+ /**
+ * required string message_name = 11;
+ */
+ public Builder setMessageNameBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000008;
messageName_ = value;
onChanged();
+ return this;
}
-
+
// optional bytes message = 12;
private com.google.protobuf.ByteString message_ = com.google.protobuf.ByteString.EMPTY;
+ /**
+ * optional bytes message = 12;
+ */
public boolean hasMessage() {
return ((bitField0_ & 0x00000010) == 0x00000010);
}
+ /**
+ * optional bytes message = 12;
+ */
public com.google.protobuf.ByteString getMessage() {
return message_;
}
+ /**
+ * optional bytes message = 12;
+ */
public Builder setMessage(com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
@@ -1335,29 +1760,59 @@ public Builder setMessage(com.google.protobuf.ByteString value) {
onChanged();
return this;
}
+ /**
+ * optional bytes message = 12;
+ */
public Builder clearMessage() {
bitField0_ = (bitField0_ & ~0x00000010);
message_ = getDefaultInstance().getMessage();
onChanged();
return this;
}
-
+
// optional string error_message = 13;
private java.lang.Object errorMessage_ = "";
+ /**
+ * optional string error_message = 13;
+ */
public boolean hasErrorMessage() {
return ((bitField0_ & 0x00000020) == 0x00000020);
}
- public String getErrorMessage() {
+ /**
+ * optional string error_message = 13;
+ */
+ public java.lang.String getErrorMessage() {
java.lang.Object ref = errorMessage_;
- if (!(ref instanceof String)) {
- String s = ((com.google.protobuf.ByteString) ref).toStringUtf8();
+ if (!(ref instanceof java.lang.String)) {
+ java.lang.String s = ((com.google.protobuf.ByteString) ref)
+ .toStringUtf8();
errorMessage_ = s;
return s;
} else {
- return (String) ref;
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ * optional string error_message = 13;
+ */
+ public com.google.protobuf.ByteString
+ getErrorMessageBytes() {
+ java.lang.Object ref = errorMessage_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ errorMessage_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
}
}
- public Builder setErrorMessage(String value) {
+ /**
+ * optional string error_message = 13;
+ */
+ public Builder setErrorMessage(
+ java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
@@ -1366,18 +1821,29 @@ public Builder setErrorMessage(String value) {
onChanged();
return this;
}
+ /**
+ * optional string error_message = 13;
+ */
public Builder clearErrorMessage() {
bitField0_ = (bitField0_ & ~0x00000020);
errorMessage_ = getDefaultInstance().getErrorMessage();
onChanged();
return this;
}
- void setErrorMessage(com.google.protobuf.ByteString value) {
- bitField0_ |= 0x00000020;
+ /**
+ * optional string error_message = 13;
+ */
+ public Builder setErrorMessageBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000020;
errorMessage_ = value;
onChanged();
+ return this;
}
-
+
// repeated .norbert.NorbertMessage.Header header = 14;
private java.util.List header_ =
java.util.Collections.emptyList();
@@ -1387,10 +1853,13 @@ private void ensureHeaderIsMutable() {
bitField0_ |= 0x00000040;
}
}
-
+
private com.google.protobuf.RepeatedFieldBuilder<
com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Header, com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Header.Builder, com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.HeaderOrBuilder> headerBuilder_;
-
+
+ /**
+ * repeated .norbert.NorbertMessage.Header header = 14;
+ */
public java.util.List getHeaderList() {
if (headerBuilder_ == null) {
return java.util.Collections.unmodifiableList(header_);
@@ -1398,6 +1867,9 @@ public java.util.Listrepeated .norbert.NorbertMessage.Header header = 14;
+ */
public int getHeaderCount() {
if (headerBuilder_ == null) {
return header_.size();
@@ -1405,6 +1877,9 @@ public int getHeaderCount() {
return headerBuilder_.getCount();
}
}
+ /**
+ * repeated .norbert.NorbertMessage.Header header = 14;
+ */
public com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Header getHeader(int index) {
if (headerBuilder_ == null) {
return header_.get(index);
@@ -1412,6 +1887,9 @@ public com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Header getHeader
return headerBuilder_.getMessage(index);
}
}
+ /**
+ * repeated .norbert.NorbertMessage.Header header = 14;
+ */
public Builder setHeader(
int index, com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Header value) {
if (headerBuilder_ == null) {
@@ -1426,6 +1904,9 @@ public Builder setHeader(
}
return this;
}
+ /**
+ * repeated .norbert.NorbertMessage.Header header = 14;
+ */
public Builder setHeader(
int index, com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Header.Builder builderForValue) {
if (headerBuilder_ == null) {
@@ -1437,6 +1918,9 @@ public Builder setHeader(
}
return this;
}
+ /**
+ * repeated .norbert.NorbertMessage.Header header = 14;
+ */
public Builder addHeader(com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Header value) {
if (headerBuilder_ == null) {
if (value == null) {
@@ -1450,6 +1934,9 @@ public Builder addHeader(com.linkedin.norbert.protos.NorbertProtos.NorbertMessag
}
return this;
}
+ /**
+ * repeated .norbert.NorbertMessage.Header header = 14;
+ */
public Builder addHeader(
int index, com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Header value) {
if (headerBuilder_ == null) {
@@ -1464,6 +1951,9 @@ public Builder addHeader(
}
return this;
}
+ /**
+ * repeated .norbert.NorbertMessage.Header header = 14;
+ */
public Builder addHeader(
com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Header.Builder builderForValue) {
if (headerBuilder_ == null) {
@@ -1475,6 +1965,9 @@ public Builder addHeader(
}
return this;
}
+ /**
+ * repeated .norbert.NorbertMessage.Header header = 14;
+ */
public Builder addHeader(
int index, com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Header.Builder builderForValue) {
if (headerBuilder_ == null) {
@@ -1486,6 +1979,9 @@ public Builder addHeader(
}
return this;
}
+ /**
+ * repeated .norbert.NorbertMessage.Header header = 14;
+ */
public Builder addAllHeader(
java.lang.Iterable extends com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Header> values) {
if (headerBuilder_ == null) {
@@ -1497,6 +1993,9 @@ public Builder addAllHeader(
}
return this;
}
+ /**
+ * repeated .norbert.NorbertMessage.Header header = 14;
+ */
public Builder clearHeader() {
if (headerBuilder_ == null) {
header_ = java.util.Collections.emptyList();
@@ -1507,6 +2006,9 @@ public Builder clearHeader() {
}
return this;
}
+ /**
+ * repeated .norbert.NorbertMessage.Header header = 14;
+ */
public Builder removeHeader(int index) {
if (headerBuilder_ == null) {
ensureHeaderIsMutable();
@@ -1517,10 +2019,16 @@ public Builder removeHeader(int index) {
}
return this;
}
+ /**
+ * repeated .norbert.NorbertMessage.Header header = 14;
+ */
public com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Header.Builder getHeaderBuilder(
int index) {
return getHeaderFieldBuilder().getBuilder(index);
}
+ /**
+ * repeated .norbert.NorbertMessage.Header header = 14;
+ */
public com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.HeaderOrBuilder getHeaderOrBuilder(
int index) {
if (headerBuilder_ == null) {
@@ -1528,6 +2036,9 @@ public com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.HeaderOrBuilder
return headerBuilder_.getMessageOrBuilder(index);
}
}
+ /**
+ * repeated .norbert.NorbertMessage.Header header = 14;
+ */
public java.util.List extends com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.HeaderOrBuilder>
getHeaderOrBuilderList() {
if (headerBuilder_ != null) {
@@ -1536,15 +2047,24 @@ public com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.HeaderOrBuilder
return java.util.Collections.unmodifiableList(header_);
}
}
+ /**
+ * repeated .norbert.NorbertMessage.Header header = 14;
+ */
public com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Header.Builder addHeaderBuilder() {
return getHeaderFieldBuilder().addBuilder(
com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Header.getDefaultInstance());
}
+ /**
+ * repeated .norbert.NorbertMessage.Header header = 14;
+ */
public com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Header.Builder addHeaderBuilder(
int index) {
return getHeaderFieldBuilder().addBuilder(
index, com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Header.getDefaultInstance());
}
+ /**
+ * repeated .norbert.NorbertMessage.Header header = 14;
+ */
public java.util.List
getHeaderBuilderList() {
return getHeaderFieldBuilder().getBuilderList();
@@ -1563,144 +2083,309 @@ public com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Header.Builder a
}
return headerBuilder_;
}
-
+
// @@protoc_insertion_point(builder_scope:norbert.NorbertMessage)
}
-
+
static {
defaultInstance = new NorbertMessage(true);
defaultInstance.initFields();
}
-
+
// @@protoc_insertion_point(class_scope:norbert.NorbertMessage)
}
-
+
public interface NodeOrBuilder
extends com.google.protobuf.MessageOrBuilder {
-
+
// required int32 id = 1;
+ /**
+ * required int32 id = 1;
+ */
boolean hasId();
+ /**
+ * required int32 id = 1;
+ */
int getId();
-
+
// required string url = 2;
+ /**
+ * required string url = 2;
+ */
boolean hasUrl();
- String getUrl();
-
+ /**
+ * required string url = 2;
+ */
+ java.lang.String getUrl();
+ /**
+ * required string url = 2;
+ */
+ com.google.protobuf.ByteString
+ getUrlBytes();
+
// repeated int32 partition = 3;
+ /**
+ * repeated int32 partition = 3;
+ */
java.util.List getPartitionList();
+ /**
+ * repeated int32 partition = 3;
+ */
int getPartitionCount();
+ /**
+ * repeated int32 partition = 3;
+ */
int getPartition(int index);
-
+
// optional int64 persistentCapability = 4;
+ /**
+ * optional int64 persistentCapability = 4;
+ */
boolean hasPersistentCapability();
+ /**
+ * optional int64 persistentCapability = 4;
+ */
long getPersistentCapability();
}
+ /**
+ * Protobuf type {@code norbert.Node}
+ */
public static final class Node extends
com.google.protobuf.GeneratedMessage
implements NodeOrBuilder {
// Use Node.newBuilder() to construct.
- private Node(Builder builder) {
+ private Node(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
+ this.unknownFields = builder.getUnknownFields();
}
- private Node(boolean noInit) {}
-
+ private Node(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
private static final Node defaultInstance;
public static Node getDefaultInstance() {
return defaultInstance;
}
-
+
public Node getDefaultInstanceForType() {
return defaultInstance;
}
-
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private Node(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 8: {
+ bitField0_ |= 0x00000001;
+ id_ = input.readInt32();
+ break;
+ }
+ case 18: {
+ bitField0_ |= 0x00000002;
+ url_ = input.readBytes();
+ break;
+ }
+ case 24: {
+ if (!((mutable_bitField0_ & 0x00000004) == 0x00000004)) {
+ partition_ = new java.util.ArrayList();
+ mutable_bitField0_ |= 0x00000004;
+ }
+ partition_.add(input.readInt32());
+ break;
+ }
+ case 26: {
+ int length = input.readRawVarint32();
+ int limit = input.pushLimit(length);
+ if (!((mutable_bitField0_ & 0x00000004) == 0x00000004) && input.getBytesUntilLimit() > 0) {
+ partition_ = new java.util.ArrayList();
+ mutable_bitField0_ |= 0x00000004;
+ }
+ while (input.getBytesUntilLimit() > 0) {
+ partition_.add(input.readInt32());
+ }
+ input.popLimit(limit);
+ break;
+ }
+ case 32: {
+ bitField0_ |= 0x00000004;
+ persistentCapability_ = input.readInt64();
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ if (((mutable_bitField0_ & 0x00000004) == 0x00000004)) {
+ partition_ = java.util.Collections.unmodifiableList(partition_);
+ }
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return com.linkedin.norbert.protos.NorbertProtos.internal_static_norbert_Node_descriptor;
}
-
+
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
- return com.linkedin.norbert.protos.NorbertProtos.internal_static_norbert_Node_fieldAccessorTable;
+ return com.linkedin.norbert.protos.NorbertProtos.internal_static_norbert_Node_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ com.linkedin.norbert.protos.NorbertProtos.Node.class, com.linkedin.norbert.protos.NorbertProtos.Node.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser PARSER =
+ new com.google.protobuf.AbstractParser() {
+ public Node parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new Node(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser getParserForType() {
+ return PARSER;
}
-
+
private int bitField0_;
// required int32 id = 1;
public static final int ID_FIELD_NUMBER = 1;
private int id_;
+ /**
+ * required int32 id = 1;
+ */
public boolean hasId() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
+ /**
+ * required int32 id = 1;
+ */
public int getId() {
return id_;
}
-
+
// required string url = 2;
public static final int URL_FIELD_NUMBER = 2;
private java.lang.Object url_;
+ /**
+ * required string url = 2;
+ */
public boolean hasUrl() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
- public String getUrl() {
+ /**
+ * required string url = 2;
+ */
+ public java.lang.String getUrl() {
java.lang.Object ref = url_;
- if (ref instanceof String) {
- return (String) ref;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
- String s = bs.toStringUtf8();
- if (com.google.protobuf.Internal.isValidUtf8(bs)) {
+ java.lang.String s = bs.toStringUtf8();
+ if (bs.isValidUtf8()) {
url_ = s;
}
return s;
}
}
- private com.google.protobuf.ByteString getUrlBytes() {
+ /**
+ * required string url = 2;
+ */
+ public com.google.protobuf.ByteString
+ getUrlBytes() {
java.lang.Object ref = url_;
- if (ref instanceof String) {
+ if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
- com.google.protobuf.ByteString.copyFromUtf8((String) ref);
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
url_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
-
+
// repeated int32 partition = 3;
public static final int PARTITION_FIELD_NUMBER = 3;
private java.util.List partition_;
+ /**
+ * repeated int32 partition = 3;
+ */
public java.util.List
getPartitionList() {
return partition_;
}
+ /**
+ * repeated int32 partition = 3;
+ */
public int getPartitionCount() {
return partition_.size();
}
+ /**
+ * repeated int32 partition = 3;
+ */
public int getPartition(int index) {
return partition_.get(index);
}
-
+
// optional int64 persistentCapability = 4;
public static final int PERSISTENTCAPABILITY_FIELD_NUMBER = 4;
private long persistentCapability_;
+ /**
+ * optional int64 persistentCapability = 4;
+ */
public boolean hasPersistentCapability() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
+ /**
+ * optional int64 persistentCapability = 4;
+ */
public long getPersistentCapability() {
return persistentCapability_;
}
-
+
private void initFields() {
id_ = 0;
url_ = "";
- partition_ = java.util.Collections.emptyList();;
+ partition_ = java.util.Collections.emptyList();
persistentCapability_ = 0L;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
-
+
if (!hasId()) {
memoizedIsInitialized = 0;
return false;
@@ -1712,7 +2397,7 @@ public final boolean isInitialized() {
memoizedIsInitialized = 1;
return true;
}
-
+
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
@@ -1730,12 +2415,12 @@ public void writeTo(com.google.protobuf.CodedOutputStream output)
}
getUnknownFields().writeTo(output);
}
-
+
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
-
+
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
@@ -1762,94 +2447,83 @@ public int getSerializedSize() {
memoizedSerializedSize = size;
return size;
}
-
+
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
-
+
public static com.linkedin.norbert.protos.NorbertProtos.Node parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
- return newBuilder().mergeFrom(data).buildParsed();
+ return PARSER.parseFrom(data);
}
public static com.linkedin.norbert.protos.NorbertProtos.Node parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
- return newBuilder().mergeFrom(data, extensionRegistry)
- .buildParsed();
+ return PARSER.parseFrom(data, extensionRegistry);
}
public static com.linkedin.norbert.protos.NorbertProtos.Node parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
- return newBuilder().mergeFrom(data).buildParsed();
+ return PARSER.parseFrom(data);
}
public static com.linkedin.norbert.protos.NorbertProtos.Node parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
- return newBuilder().mergeFrom(data, extensionRegistry)
- .buildParsed();
+ return PARSER.parseFrom(data, extensionRegistry);
}
public static com.linkedin.norbert.protos.NorbertProtos.Node parseFrom(java.io.InputStream input)
throws java.io.IOException {
- return newBuilder().mergeFrom(input).buildParsed();
+ return PARSER.parseFrom(input);
}
public static com.linkedin.norbert.protos.NorbertProtos.Node parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
- return newBuilder().mergeFrom(input, extensionRegistry)
- .buildParsed();
+ return PARSER.parseFrom(input, extensionRegistry);
}
public static com.linkedin.norbert.protos.NorbertProtos.Node parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
- Builder builder = newBuilder();
- if (builder.mergeDelimitedFrom(input)) {
- return builder.buildParsed();
- } else {
- return null;
- }
+ return PARSER.parseDelimitedFrom(input);
}
public static com.linkedin.norbert.protos.NorbertProtos.Node parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
- Builder builder = newBuilder();
- if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
- return builder.buildParsed();
- } else {
- return null;
- }
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static com.linkedin.norbert.protos.NorbertProtos.Node parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
- return newBuilder().mergeFrom(input).buildParsed();
+ return PARSER.parseFrom(input);
}
public static com.linkedin.norbert.protos.NorbertProtos.Node parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
- return newBuilder().mergeFrom(input, extensionRegistry)
- .buildParsed();
+ return PARSER.parseFrom(input, extensionRegistry);
}
-
+
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(com.linkedin.norbert.protos.NorbertProtos.Node prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
-
+
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
+ /**
+ * Protobuf type {@code norbert.Node}
+ */
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements com.linkedin.norbert.protos.NorbertProtos.NodeOrBuilder {
@@ -1857,18 +2531,21 @@ public static final class Builder extends
getDescriptor() {
return com.linkedin.norbert.protos.NorbertProtos.internal_static_norbert_Node_descriptor;
}
-
+
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
- return com.linkedin.norbert.protos.NorbertProtos.internal_static_norbert_Node_fieldAccessorTable;
+ return com.linkedin.norbert.protos.NorbertProtos.internal_static_norbert_Node_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ com.linkedin.norbert.protos.NorbertProtos.Node.class, com.linkedin.norbert.protos.NorbertProtos.Node.Builder.class);
}
-
+
// Construct using com.linkedin.norbert.protos.NorbertProtos.Node.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
-
- private Builder(BuilderParent parent) {
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
@@ -1879,33 +2556,33 @@ private void maybeForceBuilderInitialization() {
private static Builder create() {
return new Builder();
}
-
+
public Builder clear() {
super.clear();
id_ = 0;
bitField0_ = (bitField0_ & ~0x00000001);
url_ = "";
bitField0_ = (bitField0_ & ~0x00000002);
- partition_ = java.util.Collections.emptyList();;
+ partition_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000004);
persistentCapability_ = 0L;
bitField0_ = (bitField0_ & ~0x00000008);
return this;
}
-
+
public Builder clone() {
return create().mergeFrom(buildPartial());
}
-
+
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
- return com.linkedin.norbert.protos.NorbertProtos.Node.getDescriptor();
+ return com.linkedin.norbert.protos.NorbertProtos.internal_static_norbert_Node_descriptor;
}
-
+
public com.linkedin.norbert.protos.NorbertProtos.Node getDefaultInstanceForType() {
return com.linkedin.norbert.protos.NorbertProtos.Node.getDefaultInstance();
}
-
+
public com.linkedin.norbert.protos.NorbertProtos.Node build() {
com.linkedin.norbert.protos.NorbertProtos.Node result = buildPartial();
if (!result.isInitialized()) {
@@ -1913,17 +2590,7 @@ public com.linkedin.norbert.protos.NorbertProtos.Node build() {
}
return result;
}
-
- private com.linkedin.norbert.protos.NorbertProtos.Node buildParsed()
- throws com.google.protobuf.InvalidProtocolBufferException {
- com.linkedin.norbert.protos.NorbertProtos.Node result = buildPartial();
- if (!result.isInitialized()) {
- throw newUninitializedMessageException(
- result).asInvalidProtocolBufferException();
- }
- return result;
- }
-
+
public com.linkedin.norbert.protos.NorbertProtos.Node buildPartial() {
com.linkedin.norbert.protos.NorbertProtos.Node result = new com.linkedin.norbert.protos.NorbertProtos.Node(this);
int from_bitField0_ = bitField0_;
@@ -1949,7 +2616,7 @@ public com.linkedin.norbert.protos.NorbertProtos.Node buildPartial() {
onBuilt();
return result;
}
-
+
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof com.linkedin.norbert.protos.NorbertProtos.Node) {
return mergeFrom((com.linkedin.norbert.protos.NorbertProtos.Node)other);
@@ -1958,14 +2625,16 @@ public Builder mergeFrom(com.google.protobuf.Message other) {
return this;
}
}
-
+
public Builder mergeFrom(com.linkedin.norbert.protos.NorbertProtos.Node other) {
if (other == com.linkedin.norbert.protos.NorbertProtos.Node.getDefaultInstance()) return this;
if (other.hasId()) {
setId(other.getId());
}
if (other.hasUrl()) {
- setUrl(other.getUrl());
+ bitField0_ |= 0x00000002;
+ url_ = other.url_;
+ onChanged();
}
if (!other.partition_.isEmpty()) {
if (partition_.isEmpty()) {
@@ -1983,7 +2652,7 @@ public Builder mergeFrom(com.linkedin.norbert.protos.NorbertProtos.Node other) {
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
-
+
public final boolean isInitialized() {
if (!hasId()) {
@@ -1995,102 +2664,102 @@ public final boolean isInitialized() {
}
return true;
}
-
+
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
- com.google.protobuf.UnknownFieldSet.Builder unknownFields =
- com.google.protobuf.UnknownFieldSet.newBuilder(
- this.getUnknownFields());
- while (true) {
- int tag = input.readTag();
- switch (tag) {
- case 0:
- this.setUnknownFields(unknownFields.build());
- onChanged();
- return this;
- default: {
- if (!parseUnknownField(input, unknownFields,
- extensionRegistry, tag)) {
- this.setUnknownFields(unknownFields.build());
- onChanged();
- return this;
- }
- break;
- }
- case 8: {
- bitField0_ |= 0x00000001;
- id_ = input.readInt32();
- break;
- }
- case 18: {
- bitField0_ |= 0x00000002;
- url_ = input.readBytes();
- break;
- }
- case 24: {
- ensurePartitionIsMutable();
- partition_.add(input.readInt32());
- break;
- }
- case 26: {
- int length = input.readRawVarint32();
- int limit = input.pushLimit(length);
- while (input.getBytesUntilLimit() > 0) {
- addPartition(input.readInt32());
- }
- input.popLimit(limit);
- break;
- }
- case 32: {
- bitField0_ |= 0x00000008;
- persistentCapability_ = input.readInt64();
- break;
- }
+ com.linkedin.norbert.protos.NorbertProtos.Node parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (com.linkedin.norbert.protos.NorbertProtos.Node) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
}
}
+ return this;
}
-
private int bitField0_;
-
+
// required int32 id = 1;
private int id_ ;
+ /**
+ * required int32 id = 1;
+ */
public boolean hasId() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
+ /**
+ * required int32 id = 1;
+ */
public int getId() {
return id_;
}
+ /**
+ * required int32 id = 1;
+ */
public Builder setId(int value) {
bitField0_ |= 0x00000001;
id_ = value;
onChanged();
return this;
}
+ /**
+ * required int32 id = 1;
+ */
public Builder clearId() {
bitField0_ = (bitField0_ & ~0x00000001);
id_ = 0;
onChanged();
return this;
}
-
+
// required string url = 2;
private java.lang.Object url_ = "";
+ /**
+ * required string url = 2;
+ */
public boolean hasUrl() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
- public String getUrl() {
+ /**
+ * required string url = 2;
+ */
+ public java.lang.String getUrl() {
java.lang.Object ref = url_;
- if (!(ref instanceof String)) {
- String s = ((com.google.protobuf.ByteString) ref).toStringUtf8();
+ if (!(ref instanceof java.lang.String)) {
+ java.lang.String s = ((com.google.protobuf.ByteString) ref)
+ .toStringUtf8();
url_ = s;
return s;
} else {
- return (String) ref;
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ * required string url = 2;
+ */
+ public com.google.protobuf.ByteString
+ getUrlBytes() {
+ java.lang.Object ref = url_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ url_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
}
}
- public Builder setUrl(String value) {
+ /**
+ * required string url = 2;
+ */
+ public Builder setUrl(
+ java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
@@ -2099,36 +2768,59 @@ public Builder setUrl(String value) {
onChanged();
return this;
}
+ /**
+ * required string url = 2;
+ */
public Builder clearUrl() {
bitField0_ = (bitField0_ & ~0x00000002);
url_ = getDefaultInstance().getUrl();
onChanged();
return this;
}
- void setUrl(com.google.protobuf.ByteString value) {
- bitField0_ |= 0x00000002;
+ /**
+ * required string url = 2;
+ */
+ public Builder setUrlBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000002;
url_ = value;
onChanged();
+ return this;
}
-
+
// repeated int32 partition = 3;
- private java.util.List partition_ = java.util.Collections.emptyList();;
+ private java.util.List partition_ = java.util.Collections.emptyList();
private void ensurePartitionIsMutable() {
if (!((bitField0_ & 0x00000004) == 0x00000004)) {
partition_ = new java.util.ArrayList(partition_);
bitField0_ |= 0x00000004;
}
}
+ /**
+ * repeated int32 partition = 3;
+ */
public java.util.List
getPartitionList() {
return java.util.Collections.unmodifiableList(partition_);
}
+ /**
+ * repeated int32 partition = 3;
+ */
public int getPartitionCount() {
return partition_.size();
}
+ /**
+ * repeated int32 partition = 3;
+ */
public int getPartition(int index) {
return partition_.get(index);
}
+ /**
+ * repeated int32 partition = 3;
+ */
public Builder setPartition(
int index, int value) {
ensurePartitionIsMutable();
@@ -2136,12 +2828,18 @@ public Builder setPartition(
onChanged();
return this;
}
+ /**
+ * repeated int32 partition = 3;
+ */
public Builder addPartition(int value) {
ensurePartitionIsMutable();
partition_.add(value);
onChanged();
return this;
}
+ /**
+ * repeated int32 partition = 3;
+ */
public Builder addAllPartition(
java.lang.Iterable extends java.lang.Integer> values) {
ensurePartitionIsMutable();
@@ -2149,45 +2847,60 @@ public Builder addAllPartition(
onChanged();
return this;
}
+ /**
+ * repeated int32 partition = 3;
+ */
public Builder clearPartition() {
- partition_ = java.util.Collections.emptyList();;
+ partition_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000004);
onChanged();
return this;
}
-
+
// optional int64 persistentCapability = 4;
private long persistentCapability_ ;
+ /**
+ * optional int64 persistentCapability = 4;
+ */
public boolean hasPersistentCapability() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
+ /**
+ * optional int64 persistentCapability = 4;
+ */
public long getPersistentCapability() {
return persistentCapability_;
}
+ /**
+ * optional int64 persistentCapability = 4;
+ */
public Builder setPersistentCapability(long value) {
bitField0_ |= 0x00000008;
persistentCapability_ = value;
onChanged();
return this;
}
+ /**
+ * optional int64 persistentCapability = 4;
+ */
public Builder clearPersistentCapability() {
bitField0_ = (bitField0_ & ~0x00000008);
persistentCapability_ = 0L;
onChanged();
return this;
}
-
+
// @@protoc_insertion_point(builder_scope:norbert.Node)
}
-
+
static {
defaultInstance = new Node(true);
defaultInstance.initFields();
}
-
+
// @@protoc_insertion_point(class_scope:norbert.Node)
}
-
+
private static com.google.protobuf.Descriptors.Descriptor
internal_static_norbert_NorbertMessage_descriptor;
private static
@@ -2203,7 +2916,7 @@ public Builder clearPersistentCapability() {
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_norbert_Node_fieldAccessorTable;
-
+
public static com.google.protobuf.Descriptors.FileDescriptor
getDescriptor() {
return descriptor;
@@ -2212,18 +2925,18 @@ public Builder clearPersistentCapability() {
descriptor;
static {
java.lang.String[] descriptorData = {
- "\n\rnorbert.proto\022\007norbert\"\264\002\n\016NorbertMess" +
- "age\022\026\n\016request_id_msb\030\001 \002(\020\022\026\n\016request_i" +
- "d_lsb\030\002 \002(\020\0222\n\006status\030\n \001(\0162\036.norbert.No" +
- "rbertMessage.Status:\002OK\022\024\n\014message_name\030" +
- "\013 \002(\t\022\017\n\007message\030\014 \001(\014\022\025\n\rerror_message\030" +
- "\r \001(\t\022.\n\006header\030\016 \003(\0132\036.norbert.NorbertM" +
- "essage.Header\032$\n\006Header\022\013\n\003key\030\001 \002(\t\022\r\n\005" +
- "value\030\002 \001(\t\"*\n\006Status\022\006\n\002OK\020\000\022\t\n\005ERROR\020\001" +
- "\022\r\n\tHEAVYLOAD\020\002\"P\n\004Node\022\n\n\002id\030\001 \002(\005\022\013\n\003u" +
- "rl\030\002 \002(\t\022\021\n\tpartition\030\003 \003(\005\022\034\n\024persisten",
- "tCapability\030\004 \001(\003B.\n\033com.linkedin.norber" +
- "t.protosB\rNorbertProtosH\001"
+ "\n\026protobuf/norbert.proto\022\007norbert\"\264\002\n\016No" +
+ "rbertMessage\022\026\n\016request_id_msb\030\001 \002(\020\022\026\n\016" +
+ "request_id_lsb\030\002 \002(\020\0222\n\006status\030\n \001(\0162\036.n" +
+ "orbert.NorbertMessage.Status:\002OK\022\024\n\014mess" +
+ "age_name\030\013 \002(\t\022\017\n\007message\030\014 \001(\014\022\025\n\rerror" +
+ "_message\030\r \001(\t\022.\n\006header\030\016 \003(\0132\036.norbert" +
+ ".NorbertMessage.Header\032$\n\006Header\022\013\n\003key\030" +
+ "\001 \002(\t\022\r\n\005value\030\002 \001(\t\"*\n\006Status\022\006\n\002OK\020\000\022\t" +
+ "\n\005ERROR\020\001\022\r\n\tHEAVYLOAD\020\002\"P\n\004Node\022\n\n\002id\030\001" +
+ " \002(\005\022\013\n\003url\030\002 \002(\t\022\021\n\tpartition\030\003 \003(\005\022\034\n\024",
+ "persistentCapability\030\004 \001(\003B.\n\033com.linked" +
+ "in.norbert.protosB\rNorbertProtosH\001"
};
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@@ -2235,25 +2948,19 @@ public com.google.protobuf.ExtensionRegistry assignDescriptors(
internal_static_norbert_NorbertMessage_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_norbert_NorbertMessage_descriptor,
- new java.lang.String[] { "RequestIdMsb", "RequestIdLsb", "Status", "MessageName", "Message", "ErrorMessage", "Header", },
- com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.class,
- com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Builder.class);
+ new java.lang.String[] { "RequestIdMsb", "RequestIdLsb", "Status", "MessageName", "Message", "ErrorMessage", "Header", });
internal_static_norbert_NorbertMessage_Header_descriptor =
internal_static_norbert_NorbertMessage_descriptor.getNestedTypes().get(0);
internal_static_norbert_NorbertMessage_Header_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_norbert_NorbertMessage_Header_descriptor,
- new java.lang.String[] { "Key", "Value", },
- com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Header.class,
- com.linkedin.norbert.protos.NorbertProtos.NorbertMessage.Header.Builder.class);
+ new java.lang.String[] { "Key", "Value", });
internal_static_norbert_Node_descriptor =
getDescriptor().getMessageTypes().get(1);
internal_static_norbert_Node_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_norbert_Node_descriptor,
- new java.lang.String[] { "Id", "Url", "Partition", "PersistentCapability", },
- com.linkedin.norbert.protos.NorbertProtos.Node.class,
- com.linkedin.norbert.protos.NorbertProtos.Node.Builder.class);
+ new java.lang.String[] { "Id", "Url", "Partition", "PersistentCapability", });
return null;
}
};
@@ -2262,6 +2969,6 @@ public com.google.protobuf.ExtensionRegistry assignDescriptors(
new com.google.protobuf.Descriptors.FileDescriptor[] {
}, assigner);
}
-
+
// @@protoc_insertion_point(outer_class_scope)
}
diff --git a/gradle.properties b/gradle.properties
index 0d2fc7dd..96f073e3 100644
--- a/gradle.properties
+++ b/gradle.properties
@@ -1,4 +1,4 @@
-version=0.6.85
-defaultScalaVersion=2.10.3
-targetScalaVersions=2.10.3
+version=0.6.85-tm-pb26-2
+defaultScalaVersion=2.10.4
+targetScalaVersions=2.10.4
crossBuild=false
diff --git a/java-cluster/build.gradle b/java-cluster/build.gradle
index dd59778b..28c4c1cf 100644
--- a/java-cluster/build.gradle
+++ b/java-cluster/build.gradle
@@ -4,5 +4,6 @@ apply plugin: 'scala'
dependencies {
compile project(":cluster$scalaSuffix")
compile externalDependency.scalaLibrary
+ compile externalDependency.scalaActors
}
diff --git a/java-network/build.gradle b/java-network/build.gradle
index 8d17490f..0946aa32 100644
--- a/java-network/build.gradle
+++ b/java-network/build.gradle
@@ -6,5 +6,6 @@ dependencies {
compile project(":java-cluster$scalaSuffix")
compile externalDependency.scalaLibrary
+ compile externalDependency.scalaActors
}
diff --git a/java-network/src/main/java/com/linkedin/norbert/javacompat/network/ConsistentHashPartitionedLoadBalancer.java b/java-network/src/main/java/com/linkedin/norbert/javacompat/network/ConsistentHashPartitionedLoadBalancer.java
index 939f84ed..70f1338a 100644
--- a/java-network/src/main/java/com/linkedin/norbert/javacompat/network/ConsistentHashPartitionedLoadBalancer.java
+++ b/java-network/src/main/java/com/linkedin/norbert/javacompat/network/ConsistentHashPartitionedLoadBalancer.java
@@ -1,24 +1,19 @@
package com.linkedin.norbert.javacompat.network;
import com.linkedin.norbert.javacompat.cluster.Node;
-import java.util.AbstractMap;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.NavigableMap;
-import java.util.Set;
-import java.util.TreeMap;
+
+import java.util.*;
public class ConsistentHashPartitionedLoadBalancer implements PartitionedLoadBalancer
{
private final HashFunction _hashFunction;
- private final Map> _rings;
+ private final NavigableMap> _rings;
private final TreeMap>> _routingMap;
public ConsistentHashPartitionedLoadBalancer(HashFunction hashFunction,
- Map> rings,
+ NavigableMap> rings,
TreeMap>> routingMap,
PartitionedLoadBalancer fallThrough) {
this._hashFunction = hashFunction;
@@ -50,7 +45,7 @@ public static ConsistentHashPartitionedLoadBalancer> rings = new TreeMap>();
+ NavigableMap> rings = new TreeMap>();
for (Map.Entry> entry : partitionNodes.entrySet())
{
Integer partId = entry.getKey();
@@ -120,8 +115,9 @@ public Node nextNode(PartitionedId partitionedId)
public Node nextNode(PartitionedId partitionedId, Long capability, Long persistentCapability)
{
long hash = _hashFunction.hash(partitionedId.toString());
- long partitionId = (int)(Math.abs(hash) % _rings.size());
- NavigableMap ring = _rings.get(partitionId);
+ int partitionId = (int)(Math.abs(hash) % _rings.size());
+ Map.Entry> ringEntry = lookup(_rings, partitionId);
+ NavigableMap ring = ringEntry.getValue();
Endpoint endpoint = searchWheel(ring, hash, new Function() {
@Override
public Boolean apply(Endpoint key) {
@@ -131,7 +127,22 @@ public Boolean apply(Endpoint key) {
return endpoint.getNode();
}
- @Override
+ @Override
+ public LinkedHashSet nextNodes(PartitionedId partitionedId) {
+ return null;
+ }
+
+ @Override
+ public LinkedHashSet nextNodes(PartitionedId partitionedId, Long capability) {
+ return null;
+ }
+
+ @Override
+ public LinkedHashSet nextNodes(PartitionedId partitionedId, Long capability, Long persistentCapability) {
+ return null;
+ }
+
+ @Override
public Set nodesForPartitionedId(PartitionedId partitionedId) {
return nodesForPartitionedId(partitionedId, 0L, 0L);
}
diff --git a/java-network/src/main/java/com/linkedin/norbert/javacompat/network/PartitionedLoadBalancer.java b/java-network/src/main/java/com/linkedin/norbert/javacompat/network/PartitionedLoadBalancer.java
index d3fd718a..92a10cce 100644
--- a/java-network/src/main/java/com/linkedin/norbert/javacompat/network/PartitionedLoadBalancer.java
+++ b/java-network/src/main/java/com/linkedin/norbert/javacompat/network/PartitionedLoadBalancer.java
@@ -15,10 +15,11 @@
*/
package com.linkedin.norbert.javacompat.network;
+import com.linkedin.norbert.javacompat.cluster.Node;
+
+import java.util.LinkedHashSet;
import java.util.Map;
import java.util.Set;
-import java.lang.Long;
-import com.linkedin.norbert.javacompat.cluster.Node;
/**
* A PartitionedLoadBalancer handles calculating the next Node a message should be routed to
@@ -59,7 +60,39 @@ public interface PartitionedLoadBalancer {
*/
Node nextNode(PartitionedId id, Long capability, Long persistentCapability);
- /**
+
+ /**
+ * Returns the next Node a message should be routed to based on the PartitionId provided.
+ *
+ * @param id the id to be used to calculate partitioning information.
+ *
+ * @return the Node to route the next message to
+ */
+ LinkedHashSet nextNodes(PartitionedId id);
+
+ /**
+ * Returns the next Node a message should be routed to based on the PartitionId provided.
+ *
+ * @param id the id to be used to calculate partitioning information.
+ * @param capability the minimal capability required by client
+ *
+ * @return the Node to route the next message to
+ */
+ LinkedHashSet nextNodes(PartitionedId id, Long capability);
+
+ /**
+ * Returns the next Node a message should be routed to based on the PartitionId provided.
+ *
+ * @param id the id to be used to calculate partitioning information.
+ * @param capability the minimal capability required by client
+ * @param persistentCapability the capability of more persistent nature
+ *
+ * @return the Node to route the next message to
+ */
+ LinkedHashSet nextNodes(PartitionedId id, Long capability, Long persistentCapability);
+
+
+ /**
* Returns all replica nodes for the same partitionedId
* @return the Nodes to multicast the next messages to each replica
*/
diff --git a/java-network/src/main/java/com/linkedin/norbert/javacompat/network/RingHashPartitionedLoadBalancer.java b/java-network/src/main/java/com/linkedin/norbert/javacompat/network/RingHashPartitionedLoadBalancer.java
index f1a74cd4..1fc8d76b 100644
--- a/java-network/src/main/java/com/linkedin/norbert/javacompat/network/RingHashPartitionedLoadBalancer.java
+++ b/java-network/src/main/java/com/linkedin/norbert/javacompat/network/RingHashPartitionedLoadBalancer.java
@@ -18,6 +18,7 @@
import com.linkedin.norbert.javacompat.cluster.Node;
import org.apache.log4j.Logger;
+import java.util.LinkedHashSet;
import java.util.Map;
import java.util.Set;
import java.util.TreeMap;
@@ -83,8 +84,23 @@ public Node nextNode(Integer partitionedId, Long capability, Long persistentCapa
log.warn("All endpoints seem unavailable! Using the default");
return firstEndpoint.getNode();
}
-
- @Override
+
+ @Override
+ public LinkedHashSet nextNodes(Integer integer) {
+ return null;
+ }
+
+ @Override
+ public LinkedHashSet nextNodes(Integer integer, Long capability) {
+ return null;
+ }
+
+ @Override
+ public LinkedHashSet nextNodes(Integer integer, Long capability, Long persistentCapability) {
+ return null;
+ }
+
+ @Override
public Set nodesForPartitionedId(Integer partitionedId) {
throw new UnsupportedOperationException();
}
diff --git a/java-network/src/main/scala/com/linkedin/norbert/javacompat/network/JavaLbfToScalaLbf.scala b/java-network/src/main/scala/com/linkedin/norbert/javacompat/network/JavaLbfToScalaLbf.scala
index 588a6839..98d9256b 100644
--- a/java-network/src/main/scala/com/linkedin/norbert/javacompat/network/JavaLbfToScalaLbf.scala
+++ b/java-network/src/main/scala/com/linkedin/norbert/javacompat/network/JavaLbfToScalaLbf.scala
@@ -2,13 +2,13 @@ package com.linkedin.norbert
package javacompat
package network
-import com.linkedin.norbert.network.partitioned.loadbalancer.{PartitionedLoadBalancerFactory => SPartitionedLoadBalancerFactory, PartitionedLoadBalancer => SPartitionedLoadBalancer}
-import com.linkedin.norbert.network.client.loadbalancer.{LoadBalancerFactory => SLoadBalancerFactory, LoadBalancer => SLoadBalancer}
+import java.util
+import com.linkedin.norbert.EndpointConversions._
import com.linkedin.norbert.cluster.{Node => SNode}
+import com.linkedin.norbert.javacompat.cluster.Node
import com.linkedin.norbert.network.common.{Endpoint => SEndpoint}
-
-import com.linkedin.norbert.EndpointConversions._
+import com.linkedin.norbert.network.partitioned.loadbalancer.{PartitionedLoadBalancer => SPartitionedLoadBalancer, PartitionedLoadBalancerFactory => SPartitionedLoadBalancerFactory}
class JavaLbfToScalaLbf[PartitionedId](javaLbf: PartitionedLoadBalancerFactory[PartitionedId]) extends SPartitionedLoadBalancerFactory[PartitionedId] {
def newLoadBalancer(nodes: Set[SEndpoint]) = {
@@ -78,7 +78,25 @@ class JavaLbfToScalaLbf[PartitionedId](javaLbf: PartitionedLoadBalancerFactory[P
}
sMap
}
- }
+
+ def rewrap(nodes: util.LinkedHashSet[com.linkedin.norbert.javacompat.cluster.Node]): util.LinkedHashSet[SNode] = {
+ val result = new util.LinkedHashSet[SNode]()
+ val it = nodes.iterator()
+ while(it.hasNext) {
+ val node:Node = it.next()
+ result.add(new SNode(node.id, node.url, node.available, node.partitionIds, node.capability, node.persistentCapability))
+ }
+ result
+ }
+ override def nextNodes(id: PartitionedId, capability: Option[Long], persistentCapability: Option[Long]): util.LinkedHashSet[SNode] = {
+ (capability, persistentCapability) match {
+ case (Some(c),Some(pc)) => rewrap(lb.nextNodes(id, c.longValue, pc.longValue))
+ case (None, Some(pc)) => rewrap(lb.nextNodes(id, 0L, pc.longValue))
+ case (Some(c), None) => rewrap(lb.nextNodes(id, c.longValue, 0L))
+ case (None, None) => rewrap(lb.nextNodes(id))
+ }
+ }
+ }
}
diff --git a/java-network/src/main/scala/com/linkedin/norbert/javacompat/network/ScalaLbfToJavaLbf.scala b/java-network/src/main/scala/com/linkedin/norbert/javacompat/network/ScalaLbfToJavaLbf.scala
index 6f821553..3d62221c 100644
--- a/java-network/src/main/scala/com/linkedin/norbert/javacompat/network/ScalaLbfToJavaLbf.scala
+++ b/java-network/src/main/scala/com/linkedin/norbert/javacompat/network/ScalaLbfToJavaLbf.scala
@@ -2,10 +2,12 @@ package com.linkedin.norbert
package javacompat
package network
-import com.linkedin.norbert.network.partitioned.loadbalancer.{PartitionedLoadBalancerFactory => SPartitionedLoadBalancerFactory}
+import java.{lang, util}
+
import com.linkedin.norbert.EndpointConversions._
-import javacompat.cluster.Node
-import javacompat._
+import com.linkedin.norbert.javacompat._
+import com.linkedin.norbert.javacompat.cluster.{JavaNode, Node => JNode}
+import com.linkedin.norbert.network.partitioned.loadbalancer.{PartitionedLoadBalancerFactory => SPartitionedLoadBalancerFactory}
class ScalaLbfToJavaLbf[PartitionedId](scalaLbf: SPartitionedLoadBalancerFactory[PartitionedId]) extends PartitionedLoadBalancerFactory[PartitionedId] {
@@ -22,7 +24,7 @@ class ScalaLbfToJavaLbf[PartitionedId](scalaLbf: SPartitionedLoadBalancerFactory
def nodesForOneReplica(id: PartitionedId, capability: java.lang.Long, persistentCapability: java.lang.Long) = {
val replica = scalaBalancer.nodesForOneReplica(id, capability, persistentCapability)
- val result = new java.util.HashMap[Node, java.util.Set[java.lang.Integer]](replica.size)
+ val result = new java.util.HashMap[JNode, java.util.Set[java.lang.Integer]](replica.size)
replica.foreach { case (node, partitions) =>
result.put(node, partitions)
@@ -48,7 +50,7 @@ class ScalaLbfToJavaLbf[PartitionedId](scalaLbf: SPartitionedLoadBalancerFactory
def nodesForPartitionedId(id: PartitionedId, capability: java.lang.Long, persistentCapability: java.lang.Long) = {
val set = scalaBalancer.nodesForPartitionedId(id, capability, persistentCapability)
- val jSet = new java.util.HashSet[Node]()
+ val jSet = new java.util.HashSet[JNode]()
set.foldLeft(jSet) { case (jSet, node) => {jSet.add(node); jSet} }
jSet
}
@@ -58,7 +60,7 @@ class ScalaLbfToJavaLbf[PartitionedId](scalaLbf: SPartitionedLoadBalancerFactory
def nodesForPartitions(id: PartitionedId, partitions: java.util.Set[java.lang.Integer], capability: java.lang.Long) = nodesForPartitions(id, partitions, capability, 0L)
def nodesForPartitions(id: PartitionedId, partitions:java.util.Set[java.lang.Integer], capability: java.lang.Long, persistentCapability: java.lang.Long) = {
val replica = scalaBalancer.nodesForPartitions(id, partitions, capability, persistentCapability)
- val result = new java.util.HashMap[Node, java.util.Set[java.lang.Integer]](replica.size)
+ val result = new java.util.HashMap[JNode, java.util.Set[java.lang.Integer]](replica.size)
replica.foreach { case (node, partitions) =>
result.put(node, partitions)
@@ -71,6 +73,26 @@ class ScalaLbfToJavaLbf[PartitionedId](scalaLbf: SPartitionedLoadBalancerFactory
if (capability.longValue == 0L) None
else Some(capability.longValue)
}
+
+ override def nextNodes(id: PartitionedId): util.LinkedHashSet[JNode] = nextNodes(id, 0L, 0L)
+
+ override def nextNodes(id: PartitionedId, capability: lang.Long): util.LinkedHashSet[JNode] = nextNodes(id, capability, 0L)
+
+ override def nextNodes(id: PartitionedId, capability: lang.Long, persistentCapability: lang.Long): util.LinkedHashSet[JNode] = {
+ rewrap(scalaBalancer.nextNodes(id, Option(capability), Option(persistentCapability)))
+ }
+
+ def rewrap(nodes: util.LinkedHashSet[com.linkedin.norbert.cluster.Node]): util.LinkedHashSet[JNode] = {
+ val result = new util.LinkedHashSet[JNode]()
+ val it = nodes.iterator()
+ while(it.hasNext) {
+ val node:cluster.Node = it.next()
+ result.add(new JavaNode(node.id, node.url, node.available, node.partitionIds, node.capability, node.persistentCapability))
+ }
+ result
+
+ }
+
}
diff --git a/java-network/src/test/java/com/linkedin/norbert/javacompat/network/ConsistentHashPartitionedLoadBalancerTest.java b/java-network/src/test/java/com/linkedin/norbert/javacompat/network/ConsistentHashPartitionedLoadBalancerTest.java
new file mode 100644
index 00000000..6f8882eb
--- /dev/null
+++ b/java-network/src/test/java/com/linkedin/norbert/javacompat/network/ConsistentHashPartitionedLoadBalancerTest.java
@@ -0,0 +1,127 @@
+package com.linkedin.norbert.javacompat.network;
+
+import java.util.HashSet;
+import java.util.Set;
+
+import com.linkedin.norbert.javacompat.cluster.JavaNode;
+import com.linkedin.norbert.javacompat.cluster.Node;
+import org.junit.Assert;
+import org.junit.Test;
+import scala.Option;
+
+/**
+ * A unit test for the javacompat ConsistentHashPartitionedLoadBalancer.
+ */
+public class ConsistentHashPartitionedLoadBalancerTest {
+
+ private static class TestEndpoint implements Endpoint {
+
+ private final Node node;
+ private final boolean canServeRequests;
+
+ public TestEndpoint(Node node, boolean canServeRequests) {
+ this.node = node;
+ this.canServeRequests = canServeRequests;
+ }
+
+ @Override
+ public Node getNode() {
+ return node;
+ }
+
+ @Override
+ public boolean canServeRequests() {
+ return canServeRequests;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+
+ TestEndpoint that = (TestEndpoint) o;
+
+ if (!node.equals(that.node)) return false;
+
+ return true;
+ }
+
+ @Override
+ public int hashCode() {
+ return node.hashCode();
+ }
+ }
+
+
+ @Test
+ public void testSingleNode() {
+
+ // simplest test case, make sure we can find the node to route to
+
+ Set testEndpoints = new HashSet();
+ Set partitionsNodeOne = new HashSet();
+ partitionsNodeOne.add(1);
+
+ Node nodeOne = new JavaNode(1, "localhost:9000", true, partitionsNodeOne, Option.empty(), Option.empty());
+ Endpoint endpointOne = new TestEndpoint(nodeOne, true);
+ testEndpoints.add(endpointOne);
+ ConsistentHashPartitionedLoadBalancer loadBalancer = ConsistentHashPartitionedLoadBalancer.build(
+ 1,
+ new HashFunction.MD5HashFunction(),
+ testEndpoints,
+ null
+ );
+ Set nodes = loadBalancer.nodesForPartitionedId(1);
+ Assert.assertNotNull(nodes);
+ Assert.assertEquals(1, nodes.size());
+ Node node = loadBalancer.nextNode(1);
+ Assert.assertNotNull(node);
+ }
+
+
+
+ @Test
+ public void testTwoNodes() {
+ // verify that both endpoints will get hit
+ Set testEndpoints = new HashSet();
+ Set partitionsNodeOne = new HashSet();
+ partitionsNodeOne.add(1);
+
+ Node nodeOne = new JavaNode(1, "localhost:9000", true, partitionsNodeOne, Option.empty(), Option.empty());
+ Endpoint endpointOne = new TestEndpoint(nodeOne, true);
+
+ Node nodeTwo = new JavaNode(2, "localhost:9001", true, partitionsNodeOne, Option.empty(), Option.empty());
+ Endpoint endpointTwo = new TestEndpoint(nodeTwo, true);
+
+ testEndpoints.add(endpointOne);
+ testEndpoints.add(endpointTwo);
+
+ ConsistentHashPartitionedLoadBalancer loadBalancer = ConsistentHashPartitionedLoadBalancer.build(
+ 1,
+ new HashFunction.MD5HashFunction(),
+ testEndpoints,
+ null
+ );
+
+ Set nodes = loadBalancer.nodesForPartitionedId(1);
+ Assert.assertNotNull(nodes);
+ Assert.assertEquals(1, nodes.size());
+
+ Node resultOne = loadBalancer.nextNode(1);
+ Node resultTwo = loadBalancer.nextNode(2);
+
+ Assert.assertNotNull(resultOne);
+ Assert.assertNotNull(resultTwo);
+
+ // this was done via trial and error, there is no shortcut here
+ Assert.assertEquals(nodeOne, resultOne);
+ Assert.assertEquals(nodeTwo, resultTwo);
+ }
+
+ @Test
+ public void testNonOverlapOfPartitions() {
+
+ }
+
+
+}
diff --git a/network/build.gradle b/network/build.gradle
index 709fa614..80fce452 100644
--- a/network/build.gradle
+++ b/network/build.gradle
@@ -4,6 +4,7 @@ apply plugin: 'scala'
dependencies {
compile project(":cluster$scalaSuffix")
compile externalDependency.scalaLibrary
+ compile externalDependency.scalaActors
compile externalDependency.netty
compile externalDependency.slf4jApi
compile externalDependency.slf4jLog4j
diff --git a/network/src/main/java/com/google/protobuf/ByteStringUtils.java b/network/src/main/java/com/google/protobuf/ByteStringUtils.java
new file mode 100644
index 00000000..77666a2a
--- /dev/null
+++ b/network/src/main/java/com/google/protobuf/ByteStringUtils.java
@@ -0,0 +1,14 @@
+package com.google.protobuf;
+
+/**
+ * A small utility to avoid byte copying of arrays by protocol buffers.
+ *
+ * Trades safety/security for performance.
+ *
+ */
+public class ByteStringUtils {
+
+ public static ByteString wrap(byte[] buffer) {
+ return new LiteralByteString(buffer);
+ }
+}
diff --git a/network/src/main/scala/com/linkedin/norbert/network/client/NetworkClient.scala b/network/src/main/scala/com/linkedin/norbert/network/client/NetworkClient.scala
index 1ddb54ff..9df967c1 100644
--- a/network/src/main/scala/com/linkedin/norbert/network/client/NetworkClient.scala
+++ b/network/src/main/scala/com/linkedin/norbert/network/client/NetworkClient.scala
@@ -18,29 +18,32 @@ package network
package client
import java.util.concurrent.Future
-import loadbalancer.{LoadBalancerFactory, LoadBalancer, LoadBalancerFactoryComponent}
-import server.{MessageExecutorComponent, NetworkServer}
-import cluster._
-import network.common._
-import netty.NettyNetworkClient
+
+import com.linkedin.norbert.cluster._
+import com.linkedin.norbert.network.client.loadbalancer.{LoadBalancer, LoadBalancerFactory, LoadBalancerFactoryComponent}
+import com.linkedin.norbert.network.common._
+import com.linkedin.norbert.network.netty.NettyNetworkClient
+import com.linkedin.norbert.network.server.{MessageExecutorComponent, NetworkServer}
+
+import scala.beans.BeanProperty
object NetworkClientConfig {
var defaultIteratorTimeout = NetworkDefaults.DEFAULT_ITERATOR_TIMEOUT;
}
class NetworkClientConfig {
- var clusterClient: ClusterClient = _
- var clientName: String = _
- var serviceName: String = _
- var zooKeeperConnectString: String = _
- var zooKeeperSessionTimeoutMillis = ClusterDefaults.ZOOKEEPER_SESSION_TIMEOUT_MILLIS
+ @BeanProperty var clusterClient: ClusterClient = _
+ @BeanProperty var clientName: String = _
+ @BeanProperty var serviceName: String = _
+ @BeanProperty var zooKeeperConnectString: String = _
+ @BeanProperty var zooKeeperSessionTimeoutMillis = ClusterDefaults.ZOOKEEPER_SESSION_TIMEOUT_MILLIS
- var connectTimeoutMillis = NetworkDefaults.CONNECT_TIMEOUT_MILLIS
- var writeTimeoutMillis = NetworkDefaults.WRITE_TIMEOUT_MILLIS
- var maxConnectionsPerNode = NetworkDefaults.MAX_CONNECTIONS_PER_NODE
+ @BeanProperty var connectTimeoutMillis = NetworkDefaults.CONNECT_TIMEOUT_MILLIS
+ @BeanProperty var writeTimeoutMillis = NetworkDefaults.WRITE_TIMEOUT_MILLIS
+ @BeanProperty var maxConnectionsPerNode = NetworkDefaults.MAX_CONNECTIONS_PER_NODE
- var staleRequestTimeoutMins = NetworkDefaults.STALE_REQUEST_TIMEOUT_MINS
- var staleRequestCleanupFrequenceMins = NetworkDefaults.STALE_REQUEST_CLEANUP_FREQUENCY_MINS
+ @BeanProperty var staleRequestTimeoutMins = NetworkDefaults.STALE_REQUEST_TIMEOUT_MINS
+ @BeanProperty var staleRequestCleanupFrequenceMins = NetworkDefaults.STALE_REQUEST_CLEANUP_FREQUENCY_MINS
/**
* Represents how long a channel stays alive. There are some specifics:
@@ -48,22 +51,22 @@ class NetworkClientConfig {
* closeChannelTimeMillis == 0: Immediately close the channel
* closeChannelTimeMillis > 0: Close the channel after closeChannelTimeMillis
*/
- var closeChannelTimeMillis = NetworkDefaults.CLOSE_CHANNEL_TIMEOUT_MILLIS
+ @BeanProperty var closeChannelTimeMillis = NetworkDefaults.CLOSE_CHANNEL_TIMEOUT_MILLIS
- var requestStatisticsWindow = NetworkDefaults.REQUEST_STATISTICS_WINDOW
+ @BeanProperty var requestStatisticsWindow = NetworkDefaults.REQUEST_STATISTICS_WINDOW
- var outlierMuliplier = NetworkDefaults.OUTLIER_MULTIPLIER
- var outlierConstant = NetworkDefaults.OUTLIER_CONSTANT
+ @BeanProperty var outlierMuliplier = NetworkDefaults.OUTLIER_MULTIPLIER
+ @BeanProperty var outlierConstant = NetworkDefaults.OUTLIER_CONSTANT
- var responseHandlerCorePoolSize = NetworkDefaults.RESPONSE_THREAD_CORE_POOL_SIZE
- var responseHandlerMaxPoolSize = NetworkDefaults.RESPONSE_THREAD_MAX_POOL_SIZE
- var responseHandlerKeepAliveTime = NetworkDefaults.RESPONSE_THREAD_KEEP_ALIVE_TIME_SECS
- var responseHandlerMaxWaitingQueueSize = NetworkDefaults.RESPONSE_THREAD_POOL_QUEUE_SIZE
+ @BeanProperty var responseHandlerCorePoolSize = NetworkDefaults.RESPONSE_THREAD_CORE_POOL_SIZE
+ @BeanProperty var responseHandlerMaxPoolSize = NetworkDefaults.RESPONSE_THREAD_MAX_POOL_SIZE
+ @BeanProperty var responseHandlerKeepAliveTime = NetworkDefaults.RESPONSE_THREAD_KEEP_ALIVE_TIME_SECS
+ @BeanProperty var responseHandlerMaxWaitingQueueSize = NetworkDefaults.RESPONSE_THREAD_POOL_QUEUE_SIZE
- var avoidByteStringCopy = NetworkDefaults.AVOID_BYTESTRING_COPY
- var darkCanaryServiceName: Option[String] = None
- var retryStrategy:Option[RetryStrategy] = None
- var duplicatesOk:Boolean = false
+ @BeanProperty var avoidByteStringCopy = NetworkDefaults.AVOID_BYTESTRING_COPY
+ @BeanProperty var darkCanaryServiceName: Option[String] = None
+ @BeanProperty var retryStrategy:Option[RetryStrategy] = None
+ @BeanProperty var duplicatesOk:Boolean = false
}
object NetworkClient {
diff --git a/network/src/main/scala/com/linkedin/norbert/network/netty/NettyNetworkClient.scala b/network/src/main/scala/com/linkedin/norbert/network/netty/NettyNetworkClient.scala
index db9db508..f574c741 100644
--- a/network/src/main/scala/com/linkedin/norbert/network/netty/NettyNetworkClient.scala
+++ b/network/src/main/scala/com/linkedin/norbert/network/netty/NettyNetworkClient.scala
@@ -24,7 +24,7 @@ import org.jboss.netty.handler.codec.frame.{LengthFieldBasedFrameDecoder, Length
import org.jboss.netty.handler.codec.protobuf.{ProtobufDecoder, ProtobufEncoder}
import java.util.concurrent.Executors
import partitioned.loadbalancer.{PartitionedLoadBalancerFactoryComponent, PartitionedLoadBalancerFactory}
-import partitioned.PartitionedNetworkClient
+import com.linkedin.norbert.network.partitioned.{PartitionedNetworkClientFailOver, PartitionedNetworkClient}
import client.loadbalancer.{LoadBalancerFactoryComponent, LoadBalancerFactory}
import com.linkedin.norbert.cluster.{Node, ClusterClient, ClusterClientComponent}
import protos.NorbertProtos
@@ -157,3 +157,8 @@ class NettyPartitionedNetworkClient[PartitionedId](clientConfig: NetworkClientCo
with PartitionedNetworkClient[PartitionedId] with PartitionedLoadBalancerFactoryComponent[PartitionedId] {
setConfig(clientConfig)
}
+
+class NettyPartitionedFailOverNetworkClient[PartitionedId](clientConfig: NetworkClientConfig, val loadBalancerFactory: PartitionedLoadBalancerFactory[PartitionedId]) extends BaseNettyNetworkClient(clientConfig)
+with PartitionedNetworkClientFailOver[PartitionedId] with PartitionedLoadBalancerFactoryComponent[PartitionedId] {
+ setConfig(clientConfig)
+}
diff --git a/network/src/main/scala/com/linkedin/norbert/network/netty/NettyNetworkServer.scala b/network/src/main/scala/com/linkedin/norbert/network/netty/NettyNetworkServer.scala
index 0dce3db9..1e58855e 100644
--- a/network/src/main/scala/com/linkedin/norbert/network/netty/NettyNetworkServer.scala
+++ b/network/src/main/scala/com/linkedin/norbert/network/netty/NettyNetworkServer.scala
@@ -30,25 +30,27 @@ import protos.NorbertProtos
import norbertutils.NamedPoolThreadFactory
import org.jboss.netty.channel.{Channels, ChannelPipelineFactory}
+import scala.beans.BeanProperty
+
class NetworkServerConfig {
- var clusterClient: ClusterClient = _
- var serviceName: String = _
- var zooKeeperConnectString: String = _
- var zooKeeperSessionTimeoutMillis = 30000
+ @BeanProperty var clusterClient: ClusterClient = _
+ @BeanProperty var serviceName: String = _
+ @BeanProperty var zooKeeperConnectString: String = _
+ @BeanProperty var zooKeeperSessionTimeoutMillis = 30000
- var requestTimeoutMillis = NetworkDefaults.REQUEST_TIMEOUT_MILLIS
- var responseGenerationTimeoutMillis = -1//turned off by default
+ @BeanProperty var requestTimeoutMillis = NetworkDefaults.REQUEST_TIMEOUT_MILLIS
+ @BeanProperty var responseGenerationTimeoutMillis = -1//turned off by default
- var requestThreadCorePoolSize = NetworkDefaults.REQUEST_THREAD_CORE_POOL_SIZE
- var requestThreadMaxPoolSize = NetworkDefaults.REQUEST_THREAD_MAX_POOL_SIZE
- var requestThreadKeepAliveTimeSecs = NetworkDefaults.REQUEST_THREAD_KEEP_ALIVE_TIME_SECS
+ @BeanProperty var requestThreadCorePoolSize = NetworkDefaults.REQUEST_THREAD_CORE_POOL_SIZE
+ @BeanProperty var requestThreadMaxPoolSize = NetworkDefaults.REQUEST_THREAD_MAX_POOL_SIZE
+ @BeanProperty var requestThreadKeepAliveTimeSecs = NetworkDefaults.REQUEST_THREAD_KEEP_ALIVE_TIME_SECS
- var threadPoolQueueSize = NetworkDefaults.REQUEST_THREAD_POOL_QUEUE_SIZE
+ @BeanProperty var threadPoolQueueSize = NetworkDefaults.REQUEST_THREAD_POOL_QUEUE_SIZE
- var requestStatisticsWindow = NetworkDefaults.REQUEST_STATISTICS_WINDOW
- var avoidByteStringCopy = NetworkDefaults.AVOID_BYTESTRING_COPY
+ @BeanProperty var requestStatisticsWindow = NetworkDefaults.REQUEST_STATISTICS_WINDOW
+ @BeanProperty var avoidByteStringCopy = NetworkDefaults.AVOID_BYTESTRING_COPY
- var shutdownPauseMultiplier = NetworkDefaults.SHUTDOWN_PAUSE_MULTIPLIER
+ @BeanProperty var shutdownPauseMultiplier = NetworkDefaults.SHUTDOWN_PAUSE_MULTIPLIER
}
class NettyNetworkServer(serverConfig: NetworkServerConfig) extends NetworkServer with ClusterClientComponent with NettyClusterIoServerComponent
diff --git a/network/src/main/scala/com/linkedin/norbert/network/partitioned/PartitionedNetworkClient.scala b/network/src/main/scala/com/linkedin/norbert/network/partitioned/PartitionedNetworkClient.scala
index 9dad0c7a..f3184eaf 100644
--- a/network/src/main/scala/com/linkedin/norbert/network/partitioned/PartitionedNetworkClient.scala
+++ b/network/src/main/scala/com/linkedin/norbert/network/partitioned/PartitionedNetworkClient.scala
@@ -17,15 +17,20 @@ package com.linkedin.norbert
package network
package partitioned
-import java.util.concurrent.Future
-import common._
-import loadbalancer.{PartitionedLoadBalancer, PartitionedLoadBalancerFactoryComponent, PartitionedLoadBalancerFactory}
-import server.{MessageExecutorComponent, NetworkServer}
-import netty.NettyPartitionedNetworkClient
-import client.NetworkClientConfig
-import cluster.{Node, ClusterDisconnectedException, InvalidClusterException, ClusterClientComponent}
+import java.net.ConnectException
+import java.util.concurrent.{Future, TimeUnit}
+
+import com.linkedin.norbert.cluster.{ClusterClientComponent, ClusterDisconnectedException, InvalidClusterException, Node}
+import com.linkedin.norbert.network.NoNodesAvailableException
+import com.linkedin.norbert.network.client.NetworkClientConfig
+import com.linkedin.norbert.network.common._
+import com.linkedin.norbert.network.netty.NettyPartitionedNetworkClient
+import com.linkedin.norbert.network.partitioned.loadbalancer.{PartitionedLoadBalancer, PartitionedLoadBalancerFactory, PartitionedLoadBalancerFactoryComponent}
+import com.linkedin.norbert.network.server.{MessageExecutorComponent, NetworkServer}
+
+import scala.beans.BeanProperty
+import scala.collection.JavaConversions
import scala.util.Random
-import java.util
object RoutingConfigs {
val defaultRoutingConfigs = new RoutingConfigs(false, false)
@@ -72,7 +77,7 @@ trait PartitionedNetworkClient[PartitionedId] extends BaseNetworkClient {
retryStrategy = config.retryStrategy
}
- @volatile private var loadBalancer: Option[Either[InvalidClusterException, PartitionedLoadBalancer[PartitionedId]]] = None
+ @volatile protected var loadBalancer: Option[Either[InvalidClusterException, PartitionedLoadBalancer[PartitionedId]]] = None
def sendRequest[RequestMsg, ResponseMsg](id: PartitionedId, request: RequestMsg, callback: Either[Throwable, ResponseMsg] => Unit)
(implicit is: InputSerializer[RequestMsg, ResponseMsg], os: OutputSerializer[RequestMsg, ResponseMsg]): Unit =
@@ -768,3 +773,46 @@ trait PartitionedNetworkClient[PartitionedId] extends BaseNetworkClient {
}
}
+
+trait PartitionedNetworkClientFailOver[PartitionedId] extends PartitionedNetworkClient[PartitionedId] {
+
+ this: ClusterClientComponent with ClusterIoClientComponent with PartitionedLoadBalancerFactoryComponent[PartitionedId] =>
+
+ val failOverAttempts:Int = 1
+
+ override def sendRequest[RequestMsg, ResponseMsg](id: PartitionedId, request: RequestMsg, callback: Either[Throwable, ResponseMsg] => Unit, capability: Option[Long], persistentCapability: Option[Long])
+ (implicit is: InputSerializer[RequestMsg, ResponseMsg], os: OutputSerializer[RequestMsg, ResponseMsg]): Unit = doIfConnected {
+ if (id == null || request == null) throw new NullPointerException
+
+ val nodes = loadBalancer.getOrElse(throw new ClusterDisconnectedException).fold(ex => throw ex,
+ lb => lb.nextNodes(id, capability, persistentCapability))
+
+ if (nodes.isEmpty) {
+ throw new NoNodesAvailableException("Unable to satisfy request, no node available for id %s".format(id))
+ } else {
+ val nodeList:List[Node] = List() ++ JavaConversions.asScalaIterator(nodes.iterator())
+ doSendRequest(nodeList, 0, id, request, callback, capability, persistentCapability)
+ }
+ }
+
+ def doSendRequest[RequestMsg, ResponseMsg](nodes:List[Node], nodeIndex:Int, id: PartitionedId, request: RequestMsg, callback: Either[Throwable, ResponseMsg] => Unit, capability: Option[Long], persistentCapability: Option[Long])
+ (implicit is: InputSerializer[RequestMsg, ResponseMsg], os: OutputSerializer[RequestMsg, ResponseMsg]): Unit = {
+
+ val node:Node = nodes(nodeIndex)
+
+ val failOverCallback = (e:Either[Throwable, ResponseMsg]) => {
+ if ((nodeIndex+1 < nodes.length) && nodeIndex < failOverAttempts) {
+ e match {
+ case Left(ex:ConnectException) => doSendRequest(nodes, nodeIndex + 1, id, request, callback, capability, persistentCapability);
+ case Left(ex:Throwable) => callback.apply(e);
+ case Right(r:ResponseMsg) => callback.apply(e);
+ }
+ } else {
+ callback.apply(e)
+ }
+ () // force unit return type
+ }
+ doSendRequest(PartitionedRequest(request, node, Set(id), (node: Node, ids: Set[PartitionedId]) => request, is, os, Option(failOverCallback)))
+ }
+
+}
\ No newline at end of file
diff --git a/network/src/main/scala/com/linkedin/norbert/network/partitioned/loadbalancer/DefaultClusteredLoadBalancerFactory.scala b/network/src/main/scala/com/linkedin/norbert/network/partitioned/loadbalancer/DefaultClusteredLoadBalancerFactory.scala
index e0588361..3ee777de 100644
--- a/network/src/main/scala/com/linkedin/norbert/network/partitioned/loadbalancer/DefaultClusteredLoadBalancerFactory.scala
+++ b/network/src/main/scala/com/linkedin/norbert/network/partitioned/loadbalancer/DefaultClusteredLoadBalancerFactory.scala
@@ -18,10 +18,13 @@ package network
package partitioned
package loadbalancer
-import logging.Logging
-import cluster.{Node, InvalidClusterException}
-import common.Endpoint
+import java.util
import java.util.concurrent.atomic.{AtomicBoolean, AtomicInteger}
+
+import com.linkedin.norbert.cluster.{InvalidClusterException, Node}
+import com.linkedin.norbert.logging.Logging
+import com.linkedin.norbert.network.common.Endpoint
+
import scala.util.Random
import scala.util.control.Breaks._
@@ -275,6 +278,15 @@ abstract class DefaultClusteredLoadBalancerFactory[PartitionedId](numPartitions:
}
}
}
+
+ /**
+ * Returns the consistent ordered set of nodes to which messages should be routed; the order is based on the PartitionId provided.
+ *
+ * @param id the id based on which the order of the nodes will be determined
+ * @return an ordered set of nodes
+ */
+ override def nextNodes(id: PartitionedId, capability: Option[Long], persistentCapability: Option[Long]): util.LinkedHashSet[Node] =
+ nodesForPartition(partitionForId(id), capability, persistentCapability)
}
/**
diff --git a/network/src/main/scala/com/linkedin/norbert/network/partitioned/loadbalancer/DefaultLoadBalancerHelper.scala b/network/src/main/scala/com/linkedin/norbert/network/partitioned/loadbalancer/DefaultLoadBalancerHelper.scala
index a304b1b4..fa4e1274 100644
--- a/network/src/main/scala/com/linkedin/norbert/network/partitioned/loadbalancer/DefaultLoadBalancerHelper.scala
+++ b/network/src/main/scala/com/linkedin/norbert/network/partitioned/loadbalancer/DefaultLoadBalancerHelper.scala
@@ -18,12 +18,13 @@ package network
package partitioned
package loadbalancer
-import cluster.{InvalidClusterException, Node}
-import common.Endpoint
+import java.util
import java.util.concurrent.atomic.{AtomicBoolean, AtomicInteger}
-import annotation.tailrec
-import client.loadbalancer.LoadBalancerHelpers
-import logging.Logging
+
+import com.linkedin.norbert.cluster.{InvalidClusterException, Node}
+import com.linkedin.norbert.logging.Logging
+import com.linkedin.norbert.network.client.loadbalancer.LoadBalancerHelpers
+import com.linkedin.norbert.network.common.Endpoint
/**
* A mixin trait that provides functionality to help implement a hash based Router.
@@ -83,7 +84,6 @@ trait DefaultLoadBalancerHelper extends LoadBalancerHelpers with Logging {
case None =>
return None
case Some((endpoints, counter, states)) =>
- import math._
val es = endpoints.size
counter.compareAndSet(java.lang.Integer.MAX_VALUE, 0)
val idx = counter.getAndIncrement
@@ -106,6 +106,32 @@ trait DefaultLoadBalancerHelper extends LoadBalancerHelpers with Logging {
}
}
+ protected def nodesForPartition(partitionId: Int, capability: Option[Long] = None, persistentCapability: Option[Long] = None): util.LinkedHashSet[Node] = {
+ partitionToNodeMap.get(partitionId) match {
+ case None =>
+ return new util.LinkedHashSet[Node]
+ case Some((endpoints, counter, states)) =>
+ val es = endpoints.size
+ counter.compareAndSet(java.lang.Integer.MAX_VALUE, 0)
+ val idx = counter.getAndIncrement
+ var i = idx
+ var loopCount = 0
+ val result = new util.LinkedHashSet[Node]
+ do {
+ val endpoint = endpoints(i % es)
+ if(endpoint.canServeRequests && endpoint.node.isCapableOf(capability, persistentCapability)) {
+ result.add(endpoint.node)
+ }
+
+ i = i + 1
+ if (i < 0) i = 0
+ loopCount = loopCount + 1
+ } while (loopCount <= es)
+
+ result
+ }
+ }
+
def compensateCounter(idx: Int, count:Int, counter:AtomicInteger) {
if (idx + 1 + count <= 0) {
// Integer overflow
diff --git a/network/src/main/scala/com/linkedin/norbert/network/partitioned/loadbalancer/DefaultPartitionedLoadBalancerFactory.scala b/network/src/main/scala/com/linkedin/norbert/network/partitioned/loadbalancer/DefaultPartitionedLoadBalancerFactory.scala
index 2a2c4df9..39b91df1 100644
--- a/network/src/main/scala/com/linkedin/norbert/network/partitioned/loadbalancer/DefaultPartitionedLoadBalancerFactory.scala
+++ b/network/src/main/scala/com/linkedin/norbert/network/partitioned/loadbalancer/DefaultPartitionedLoadBalancerFactory.scala
@@ -18,11 +18,13 @@ package network
package partitioned
package loadbalancer
-import logging.Logging
-import cluster.{Node, InvalidClusterException}
-import common.Endpoint
+import java.util
import java.util.concurrent.atomic.{AtomicBoolean, AtomicInteger}
-import scala.util.Random
+
+import com.linkedin.norbert.cluster.{InvalidClusterException, Node}
+import com.linkedin.norbert.logging.Logging
+import com.linkedin.norbert.network.common.Endpoint
+
import scala.util.control.Breaks._
/**
@@ -55,7 +57,7 @@ abstract class DefaultPartitionedLoadBalancerFactory[PartitionedId](numPartition
partitionToNodeMap.keys.foldLeft(Map.empty[Node, Set[Int]]) { (map, partition) =>
val nodeOption = nodeForPartition(partition, capability, persistentCapability)
if(nodeOption.isDefined) {
- val n = nodeOption.get
+ val n = nodeOption.iterator.next()
map + (n -> (map.getOrElse(n, Set.empty[Int]) + partition))
} else if(serveRequestsIfPartitionMissing) {
log.warn("Partition %s is unavailable, attempting to continue serving requests to other partitions.".format(partition))
@@ -96,7 +98,6 @@ abstract class DefaultPartitionedLoadBalancerFactory[PartitionedId](numPartition
case None =>
break
case Some((endpoints, counter, states)) =>
- import math._
val es = endpoints.size
counter.compareAndSet(java.lang.Integer.MAX_VALUE, 0)
val idx = counter.getAndIncrement % es
@@ -152,6 +153,15 @@ abstract class DefaultPartitionedLoadBalancerFactory[PartitionedId](numPartition
}
}
}
+
+ /**
+ * Returns the consistent ordered set of nodes to which messages should be routed; the order is based on the PartitionId provided.
+ *
+ * @param id the id based on which the order of the nodes will be determined
+ * @return an ordered set of nodes
+ */
+ override def nextNodes(id: PartitionedId, capability: Option[Long], persistentCapability: Option[Long]): util.LinkedHashSet[Node] =
+ nodesForPartition(partitionForId(id), capability, persistentCapability)
}
/**
diff --git a/network/src/main/scala/com/linkedin/norbert/network/partitioned/loadbalancer/DirectPartitionedLoadBalancer.scala b/network/src/main/scala/com/linkedin/norbert/network/partitioned/loadbalancer/DirectPartitionedLoadBalancer.scala
new file mode 100644
index 00000000..5483c358
--- /dev/null
+++ b/network/src/main/scala/com/linkedin/norbert/network/partitioned/loadbalancer/DirectPartitionedLoadBalancer.scala
@@ -0,0 +1,110 @@
+package com.linkedin.norbert.network.partitioned.loadbalancer
+
+import java.util
+
+import com.linkedin.norbert.cluster.{InvalidClusterException, Node}
+import com.linkedin.norbert.network.common.Endpoint
+
+import scala.collection.immutable.HashSet
+import scala.collection.mutable
+
+/**
+ * A partition key scheme which allows the client to specify which partition a request should go to.
+ *
+ * @param partitionId specifies the target partition
+ * @param requestKey used for further routing within the partition (e.g. consistent hashing within the partition)
+ * @tparam KeyType the type of the secondary key
+ */
+case class PartitionKey[KeyType](partitionId: Int, requestKey: KeyType) {
+
+}
+
+trait PartitionFallBackStrategy {
+ def nextPartition(currentPartition: Int): Int
+}
+
+class DefaultPartitionFallbackStrategy(defaultPartition: Int) extends PartitionFallBackStrategy {
+ override def nextPartition(currentPartition: Int): Int = defaultPartition
+}
+
+
+/**
+ * A load balancer which allows the client to specify which partition a request should go to.
+ *
+ * The request is then routed to the load balancers of the appropriate partition.
+ *
+ * @param delegateLoadBalanders a map of partitionId -> load balancer delegate
+ *
+ * @tparam KeyType the type of the secondary key part
+ */
+class DirectPartitionedLoadBalancer[KeyType](delegateLoadBalanders: Map[Int, PartitionedLoadBalancer[KeyType]],
+ fallbackStrategy: PartitionFallBackStrategy)
+ extends PartitionedLoadBalancer[PartitionKey[KeyType]] {
+
+ /**
+ * Retrieve the load balancer, or fallback to a different partition, if no load balancer exists for the
+ * current partition.
+ *
+ * If no load balancer can be found, throws an IllegalStateException
+ */
+ def getLoadBalancer(partitionId: Int): PartitionedLoadBalancer[KeyType] = {
+ var lb = delegateLoadBalanders.get(partitionId)
+ if (lb == None) {
+ val nextPartitionId = fallbackStrategy.nextPartition(partitionId)
+ if (nextPartitionId != partitionId) {
+ // fallback once
+ lb = delegateLoadBalanders.get(nextPartitionId)
+ }
+ }
+ if (lb == None) {
+ throw new IllegalStateException("No load-balancer for partition " + partitionId)
+ }
+ lb.get
+ }
+
+ override def nextNode(id: PartitionKey[KeyType], capability: Option[Long], persistentCapability: Option[Long]): Option[Node] =
+ getLoadBalancer(id.partitionId).nextNode(id.requestKey, capability, persistentCapability)
+
+ override def nodesForOneReplica(id: PartitionKey[KeyType], capability: Option[Long], persistentCapability: Option[Long]): Map[Node, Set[Int]] =
+ getLoadBalancer(id.partitionId).nodesForOneReplica(id.requestKey, capability, persistentCapability)
+
+ override def nodesForPartitions(id: PartitionKey[KeyType], partitions: Set[Int], capability: Option[Long], persistentCapability: Option[Long]): Map[Node, Set[Int]] =
+ throw new UnsupportedOperationException("Not implemented yet")
+
+ override def nodesForPartitionedId(id: PartitionKey[KeyType], capability: Option[Long], persistentCapability: Option[Long]): Set[Node] =
+ getLoadBalancer(id.partitionId).nodesForPartitionedId(id.requestKey, capability, persistentCapability)
+
+ override def nextNodes(id: PartitionKey[KeyType], capability: Option[Long], persistentCapability: Option[Long]): util.LinkedHashSet[Node] =
+ getLoadBalancer(id.partitionId).nextNodes(id.requestKey, capability, persistentCapability)
+}
+
+class DirectPartitionedLoadBalancerFactory[KeyType](delegateFactory: PartitionedLoadBalancerFactory[KeyType],
+ fallbackStrategy: PartitionFallBackStrategy) extends PartitionedLoadBalancerFactory[PartitionKey[KeyType]] {
+
+ @throws(classOf[InvalidClusterException])
+ override def newLoadBalancer(nodes: Set[Endpoint]): PartitionedLoadBalancer[PartitionKey[KeyType]] = {
+ val map = mutable.HashMap[Int, Set[Endpoint]]()
+ nodes.foreach((e: Endpoint) => {
+ e.node.partitionIds.foreach((i: Int) => {
+ val setOpt = map.get(i)
+ if (setOpt == None) {
+ val set: Set[Endpoint] = new HashSet[Endpoint]() + e
+ map += (i -> set)
+ } else {
+ val set: Set[Endpoint] = setOpt.get + e
+ map += (i -> set)
+ }
+ })
+ })
+
+ // pardon my crappy scala
+ val delegates : Map[Int,PartitionedLoadBalancer[KeyType]] = Map() ++ map.mapValues(v => delegateFactory.newLoadBalancer( v ))
+ new DirectPartitionedLoadBalancer[KeyType](delegates, fallbackStrategy)
+ }
+
+ override def getNumPartitions(endpoints: Set[Endpoint]): Int = {
+ val partitionIds = endpoints.map((x: Endpoint) => x.node.partitionIds)
+ val flat = new HashSet[Int] ++ partitionIds
+ flat.size
+ }
+}
\ No newline at end of file
diff --git a/network/src/main/scala/com/linkedin/norbert/network/partitioned/loadbalancer/HashPartitionedLoadBalancerFactory.scala b/network/src/main/scala/com/linkedin/norbert/network/partitioned/loadbalancer/HashPartitionedLoadBalancerFactory.scala
new file mode 100644
index 00000000..c114936f
--- /dev/null
+++ b/network/src/main/scala/com/linkedin/norbert/network/partitioned/loadbalancer/HashPartitionedLoadBalancerFactory.scala
@@ -0,0 +1,30 @@
+package com.linkedin.norbert.network.partitioned.loadbalancer
+
+
+/**
+ * A very simple adapter of a load balancer factory that provides a default hashCode based implementation
+ * for hash calculations.
+ *
+ * Suitable for use within Java classes.
+ *
+ * @see com.linkedin.norbert.javacompat.network.ScalaLbfToJavaLbf
+ */
+class HashPartitionedLoadBalancerFactory[PartitionedId](numPartitions: Int,
+ numReplicas: Int,
+ hashFn: PartitionedId => Int,
+ endpointHashFn: String => Int,
+ serveRequestsIfPartitionMissing: Boolean)
+ extends PartitionedConsistentHashedLoadBalancerFactory[PartitionedId](
+ numPartitions: Int,
+ numReplicas: Int,
+ hashFn: PartitionedId => Int,
+ endpointHashFn: String => Int,
+ serveRequestsIfPartitionMissing: Boolean) {
+ def this( numReplicas: Int) = {
+ this(-1, numReplicas, (p: PartitionedId) => p.hashCode, (p: String) => p.hashCode, true)
+ }
+
+ def this( numPartitions: Int, numReplicas: Int) = {
+ this(numPartitions, numReplicas, (p: PartitionedId) => p.hashCode, (p: String) => p.hashCode, true)
+ }
+}
diff --git a/network/src/main/scala/com/linkedin/norbert/network/partitioned/loadbalancer/PartitionedConsistentHashedLoadBalancerFactory.scala b/network/src/main/scala/com/linkedin/norbert/network/partitioned/loadbalancer/PartitionedConsistentHashedLoadBalancerFactory.scala
index e6cb66e3..eaaedd30 100644
--- a/network/src/main/scala/com/linkedin/norbert/network/partitioned/loadbalancer/PartitionedConsistentHashedLoadBalancerFactory.scala
+++ b/network/src/main/scala/com/linkedin/norbert/network/partitioned/loadbalancer/PartitionedConsistentHashedLoadBalancerFactory.scala
@@ -1,12 +1,11 @@
package com.linkedin.norbert.network.partitioned.loadbalancer
-import com.linkedin.norbert.network.common.Endpoint
-import java.util.TreeMap
-import com.linkedin.norbert.cluster.{Node, InvalidClusterException}
-import com.linkedin.norbert.logging.Logging
-import com.linkedin.norbert.network.client.loadbalancer.LoadBalancerHelpers
+import java.util
import java.util.concurrent.atomic.{AtomicBoolean, AtomicInteger}
+import com.linkedin.norbert.cluster.{InvalidClusterException, Node}
+import com.linkedin.norbert.network.common.Endpoint
+
/*
* Copyright 2009-2010 LinkedIn, Inc
*
@@ -59,7 +58,7 @@ class PartitionedConsistentHashedLoadBalancerFactory[PartitionedId](numPartition
}
val wheels = partitions.map { case (partition, endpointsForPartition) =>
- val wheel = new TreeMap[Int, Endpoint]
+ val wheel = new util.TreeMap[Int, Endpoint]
endpointsForPartition.foreach { endpoint =>
var r = 0
while (r < numReplicas) {
@@ -78,12 +77,15 @@ class PartitionedConsistentHashedLoadBalancerFactory[PartitionedId](numPartition
}
}
-class PartitionedConsistentHashedLoadBalancer[PartitionedId](numPartitions: Int, wheels: Map[Int, TreeMap[Int, Endpoint]], hashFn: PartitionedId => Int, serveRequestsIfPartitionMissing: Boolean = true)
+class PartitionedConsistentHashedLoadBalancer[PartitionedId](numPartitions: Int, wheels: Map[Int, util.TreeMap[Int, Endpoint]], hashFn: PartitionedId => Int, serveRequestsIfPartitionMissing: Boolean = true)
extends PartitionedLoadBalancer[PartitionedId] with DefaultLoadBalancerHelper {
import scala.collection.JavaConversions._
val endpoints = wheels.values.flatMap(_.values).toSet
val partitionToNodeMap = generatePartitionToNodeMap(endpoints, numPartitions, serveRequestsIfPartitionMissing)
val partitionIds = wheels.keySet.toSet
+ val treeWheels = new util.TreeMap[Int, util.TreeMap[Int, Endpoint]]()
+ treeWheels.putAll(wheels)
+ val wheelSize = treeWheels.size()
def nodesForOneReplica(id: PartitionedId, capability: Option[Long] = None, persistentCapability: Option[Long] = None) = {
nodesForPartitions(id, wheels, capability, persistentCapability)
@@ -91,15 +93,20 @@ class PartitionedConsistentHashedLoadBalancer[PartitionedId](numPartitions: Int,
def nodesForPartitionedId(id: PartitionedId, capability: Option[Long] = None, persistentCapability: Option[Long] = None) = {
val hash = hashFn(id)
- val partitionId = hash.abs % numPartitions
- wheels.get(partitionId).flatMap { wheel => Option(wheel.foldLeft(Set.empty[Node]) { case (set, (p, e)) => if (e.node.isCapableOf(capability, persistentCapability)) set + e.node else set }) }.get
+ val partitionId = hash.abs % wheelSize
+ val entry = PartitionUtil.wheelEntry(treeWheels, partitionId)
+ if (entry == null) {
+ Set.empty[Node]
+ } else {
+ Option(entry.getValue).flatMap { wheel => Option(wheel.foldLeft(Set.empty[Node]) { case (set, (p, e)) => if (e.node.isCapableOf(capability, persistentCapability)) set + e.node else set }) }.get
+ }
}
def nodesForPartitions(id: PartitionedId, partitions: Set[Int], capability: Option[Long] = None, persistentCapability: Option[Long] = None) = {
nodesForPartitions(id, wheels.filterKeys(partitions contains _), capability, persistentCapability)
}
- private def nodesForPartitions(id: PartitionedId, wheels: Map[Int, TreeMap[Int, Endpoint]], capability: Option[Long], persistentCapability: Option[Long]) = {
+ private def nodesForPartitions(id: PartitionedId, wheels: Map[Int, util.TreeMap[Int, Endpoint]], capability: Option[Long], persistentCapability: Option[Long]) = {
if (id == null) {
nodesForPartitions0(partitionToNodeMap filterKeys wheels.containsKey, capability, persistentCapability)
} else {
@@ -131,7 +138,7 @@ class PartitionedConsistentHashedLoadBalancer[PartitionedId](numPartitions: Int,
private def nodesForPartitions0(partitionToNodeMap: Map[Int, (IndexedSeq[Endpoint], AtomicInteger, Array[AtomicBoolean])], capability: Option[Long], persistentCapability: Option[Long] = None) = {
partitionToNodeMap.keys.foldLeft(Map.empty[Node, Set[Int]]) { (map, partition) =>
val nodeOption = nodeForPartition(partition, capability, persistentCapability)
- if(nodeOption isDefined) {
+ if(nodeOption.isDefined) {
val n = nodeOption.get
map + (n -> (map.getOrElse(n, Set.empty[Int]) + partition))
} else if(serveRequestsIfPartitionMissing) {
@@ -141,14 +148,45 @@ class PartitionedConsistentHashedLoadBalancer[PartitionedId](numPartitions: Int,
throw new InvalidClusterException("Partition %s is unavailable, cannot serve requests.".format(partition))
}
}
-
+
+ override def nextNodes(id: PartitionedId, capability: Option[Long] = None, persistentCapability: Option[Long] = None): util.LinkedHashSet[Node] = {
+ val result = new util.LinkedHashSet[Node]()
+ val hash = hashFn(id)
+ val partitionId = hash.abs % wheelSize
+ val innerMapEntry = PartitionUtil.wheelEntry(treeWheels, partitionId)
+ if (innerMapEntry == null) {
+ result
+ } else {
+ val innerMapOpt = Option(innerMapEntry.getValue)
+ if (innerMapOpt.isDefined) {
+ val innerMap = innerMapOpt.get
+ val startEntry = PartitionUtil.wheelEntry(innerMap, hash)
+ if (startEntry != null) {
+ result.add(startEntry.getValue.node)
+ var nextEntry = PartitionUtil.rotateWheel(innerMap, startEntry.getKey)
+ while (nextEntry != startEntry) {
+ result.add(nextEntry.getValue.node)
+ nextEntry = PartitionUtil.rotateWheel(innerMap, nextEntry.getKey)
+ }
+ return result
+ }
+ }
+ log.warn("Failed to find mapping for %s, expect routing failures".format(id))
+ result
+ }
+ }
def nextNode(id: PartitionedId, capability: Option[Long] = None, persistentCapability: Option[Long] = None): Option[Node] = {
val hash = hashFn(id)
- val partitionId = hash.abs % numPartitions
- wheels.get(partitionId).flatMap { wheel =>
- PartitionUtil.searchWheel(wheel, hash, (e: Endpoint) => e.canServeRequests && e.node.isCapableOf(capability, persistentCapability) )
- }.map(_.node)
+ val partitionId = hash.abs % wheelSize
+ val innerMapEntry = PartitionUtil.wheelEntry(treeWheels, partitionId)
+ if (innerMapEntry == null) {
+ None
+ } else {
+ Option(innerMapEntry.getValue).flatMap { wheel =>
+ PartitionUtil.searchWheel(wheel, hash, (e: Endpoint) => e.canServeRequests && e.node.isCapableOf(capability, persistentCapability) )
+ }.map(_.node)
+ }
}
def partitionForId(id: PartitionedId): Int = {
diff --git a/network/src/main/scala/com/linkedin/norbert/network/partitioned/loadbalancer/PartitionedLoadBalancerFactory.scala b/network/src/main/scala/com/linkedin/norbert/network/partitioned/loadbalancer/PartitionedLoadBalancerFactory.scala
index ebcf1e1a..f1ed11ec 100644
--- a/network/src/main/scala/com/linkedin/norbert/network/partitioned/loadbalancer/PartitionedLoadBalancerFactory.scala
+++ b/network/src/main/scala/com/linkedin/norbert/network/partitioned/loadbalancer/PartitionedLoadBalancerFactory.scala
@@ -19,15 +19,27 @@ package partitioned
package loadbalancer
+import java.util
+
+import com.linkedin.norbert.cluster.{InvalidClusterException, Node}
+import com.linkedin.norbert.network.common.Endpoint
+
import _root_.scala.Predef._
-import cluster.{InvalidClusterException, Node}
-import common.Endpoint
/**
* A PartitionedLoadBalancer handles calculating the next Node a message should be routed to
* based on a PartitionedId.
*/
trait PartitionedLoadBalancer[PartitionedId] {
+
+ /**
+ * Returns the consistent ordered set of nodes to which messages should be routed; the order is based on the PartitionId provided.
+ *
+ * @param id the id based on which the order of the nodes will be determined
+ * @return an ordered set of nodes
+ */
+ def nextNodes(id: PartitionedId, capability: Option[Long] = None, persistentCapability: Option[Long] = None): util.LinkedHashSet[Node]
+
/**
* Returns the next Node a message should be routed to based on the PartitionId provided.
*
diff --git a/network/src/main/scala/com/linkedin/norbert/network/partitioned/loadbalancer/SimpleConsistentHashedLoadBalancerFactory.scala b/network/src/main/scala/com/linkedin/norbert/network/partitioned/loadbalancer/SimpleConsistentHashedLoadBalancerFactory.scala
index 87c1fd9f..6dbafed2 100644
--- a/network/src/main/scala/com/linkedin/norbert/network/partitioned/loadbalancer/SimpleConsistentHashedLoadBalancerFactory.scala
+++ b/network/src/main/scala/com/linkedin/norbert/network/partitioned/loadbalancer/SimpleConsistentHashedLoadBalancerFactory.scala
@@ -19,9 +19,11 @@ package network
package partitioned
package loadbalancer
-import common.Endpoint
+import java.util
import java.util.TreeMap
-import cluster.{Node, InvalidClusterException}
+
+import com.linkedin.norbert.cluster.{InvalidClusterException, Node}
+import com.linkedin.norbert.network.common.Endpoint
/**
* This load balancer is appropriate when any server could handle the request. In this case, the partitions don't really mean anything. They simply control a percentage of the requests
@@ -63,4 +65,6 @@ class SimpleConsistentHashedLoadBalancer[PartitionedId](wheel: TreeMap[Int, Endp
def nextNode(id: PartitionedId, capability: Option[Long], persistentCapability: Option[Long]): Option[Node] = {
PartitionUtil.searchWheel(wheel, hashFn(id), (e: Endpoint) => e.canServeRequests && e.node.isCapableOf(capability, persistentCapability)).map(_.node)
}
+
+ override def nextNodes(id: PartitionedId, capability: Option[Long], persistentCapability: Option[Long]): util.LinkedHashSet[Node] = throw new UnsupportedOperationException
}
diff --git a/network/src/main/scala/com/linkedin/norbert/network/util/ProtoUtils.scala b/network/src/main/scala/com/linkedin/norbert/network/util/ProtoUtils.scala
index 3148af81..8ebdc069 100644
--- a/network/src/main/scala/com/linkedin/norbert/network/util/ProtoUtils.scala
+++ b/network/src/main/scala/com/linkedin/norbert/network/util/ProtoUtils.scala
@@ -2,7 +2,7 @@ package com.linkedin.norbert
package network
package util
-import com.google.protobuf.ByteString
+import com.google.protobuf.{ByteStringUtils, ByteString, LiteralByteString}
import logging.Logging
import java.lang.reflect.{Field, Constructor}
@@ -12,23 +12,13 @@ import java.lang.reflect.{Field, Constructor}
* to bypass those.
*/
object ProtoUtils extends Logging {
- private val byteStringConstructor: Constructor[ByteString] = try {
- val c = classOf[ByteString].getDeclaredConstructor(classOf[Array[Byte]])
- c.setAccessible(true)
- c
- } catch {
- case ex: Exception =>
- log.warn(ex, "Cannot eliminate a copy when converting a byte[] to a ByteString")
- null
- }
-
private val byteStringField: Field = try {
val f = classOf[ByteString].getDeclaredField("bytes")
f.setAccessible(true)
f
} catch {
case ex: Exception =>
- log.warn(ex, "Cannot eliminate a copy when converting a ByteString to a byte[]")
+ log.info(ex, "Cannot eliminate a copy when converting a ByteString to a byte[]")
null
}
@@ -47,16 +37,7 @@ object ProtoUtils extends Logging {
}
private final def fastByteArrayToByteString(byteArray: Array[Byte]): ByteString = {
- if(byteStringConstructor != null)
- try {
- byteStringConstructor.newInstance(byteArray)
- } catch {
- case ex: Exception =>
- log.warn(ex, "Encountered exception invoking the private ByteString constructor, falling back to safe method")
- slowByteArrayToByteString(byteArray)
- }
- else
- slowByteArrayToByteString(byteArray)
+ ByteStringUtils.wrap(byteArray)
}
private final def slowByteArrayToByteString(byteArray: Array[Byte]): ByteString = {
diff --git a/network/src/test/scala/com/linkedin/norbert/network/partitioned/PartitionedNetworkClientSpec.scala b/network/src/test/scala/com/linkedin/norbert/network/partitioned/PartitionedNetworkClientSpec.scala
index fc0bcf30..0499d016 100644
--- a/network/src/test/scala/com/linkedin/norbert/network/partitioned/PartitionedNetworkClientSpec.scala
+++ b/network/src/test/scala/com/linkedin/norbert/network/partitioned/PartitionedNetworkClientSpec.scala
@@ -17,12 +17,15 @@ package com.linkedin.norbert
package network
package partitioned
-import common.{Endpoint, ClusterIoClientComponent, BaseNetworkClientSpecification}
-import loadbalancer._
+import java.net.ConnectException
+import java.util
import java.util.concurrent.ExecutionException
-import cluster.{Node, InvalidClusterException, ClusterDisconnectedException, ClusterClientComponent}
-import scala.Left
-import scala.Some
+
+import com.linkedin.norbert.cluster._
+import com.linkedin.norbert.network.common.{ClusterIoClientComponent, BaseNetworkClientSpecification}
+import com.linkedin.norbert.network.partitioned.loadbalancer._
+
+import scala.collection.JavaConversions
class PartitionedNetworkClientSpec extends BaseNetworkClientSpecification {
val networkClient = new PartitionedNetworkClient[Int] with ClusterClientComponent with ClusterIoClientComponent
@@ -33,6 +36,7 @@ class PartitionedNetworkClientSpec extends BaseNetworkClientSpecification {
def nodesForOneReplica(id: Int, capability: Option[Long], permanentCapability: Option[Long]) = lb.nodesForOneReplica(id, capability, permanentCapability)
def nodesForPartitionedId(id: Int, capability: Option[Long], permanentCapability: Option[Long]) = lb.nodesForPartitionedId(id, capability, permanentCapability)
def nodesForPartitions(id: Int, partitions: Set[Int], capability: Option[Long], permanentCapability: Option[Long]) = lb.nodesForPartitions(id, partitions, capability, permanentCapability)
+ override def nextNodes(id: Int, capability: Option[Long], persistentCapability: Option[Long]): util.LinkedHashSet[Node] = lb.nextNodes(id, capability, persistentCapability)
}
val loadBalancerFactory = mock[PartitionedLoadBalancerFactory[Int]]
val clusterIoClient = mock[ClusterIoClient]
@@ -707,6 +711,7 @@ class PartitionedNetworkClientSpec extends BaseNetworkClientSpecification {
def nodesForOneReplica(id: Int, c: Option[Long] = None, pc: Option[Long] = None) = null
def nodesForPartitionedId(id:Int, c: Option[Long] = None, pc: Option[Long] = None) = null
def nodesForPartitions(id: Int, partitions: Set[Int], c: Option[Long] = None, pc: Option[Long] = None) = null
+ def nextNodes(id: Int, capability: Option[Long], persistentCapability: Option[Long]): util.LinkedHashSet[Node] = null
}
val loadBalancerFactory = mock[PartitionedLoadBalancerFactory[Int]]
val clusterIoClient = new ClusterIoClient {
@@ -753,6 +758,7 @@ class PartitionedNetworkClientSpec extends BaseNetworkClientSpecification {
def nodesForOneReplica(id: Int, c: Option[Long] = None, pc: Option[Long] = None) = null
def nodesForPartitionedId(id:Int, c: Option[Long] = None, pc: Option[Long] = None) = null
def nodesForPartitions(id: Int, partitions: Set[Int], c: Option[Long] = None, pc: Option[Long] = None)= null
+ def nextNodes(id: Int, capability: Option[Long], persistentCapability: Option[Long]): util.LinkedHashSet[Node] = null
}
val loadBalancerFactory = mock[PartitionedLoadBalancerFactory[Int]]
val clusterIoClient = new ClusterIoClient {
@@ -801,6 +807,8 @@ class PartitionedNetworkClientSpec extends BaseNetworkClientSpecification {
def nodesForOneReplica(id: Int, c: Option[Long] = None, pc: Option[Long] = None) = null
def nodesForPartitionedId(id:Int, c: Option[Long] = None, pc: Option[Long] = None) = null
def nodesForPartitions(id: Int, partitions: Set[Int], c: Option[Long] = None, pc: Option[Long] = None) = null
+ def nextNodes(id: Int, capability: Option[Long], persistentCapability: Option[Long]): util.LinkedHashSet[Node] = null
+
}
val loadBalancerFactory = mock[PartitionedLoadBalancerFactory[Int]]
val clusterIoClient = new ClusterIoClient {
@@ -850,6 +858,8 @@ class PartitionedNetworkClientSpec extends BaseNetworkClientSpecification {
def nodesForOneReplica(id: Int, c: Option[Long] = None, pc: Option[Long] = None) = null
def nodesForPartitionedId(id:Int, c: Option[Long] = None, pc: Option[Long] = None) = null
def nodesForPartitions(id: Int, partitions: Set[Int], c: Option[Long] = None, pc: Option[Long] = None) = null
+ def nextNodes(id: Int, capability: Option[Long], persistentCapability: Option[Long]): util.LinkedHashSet[Node] = null
+
}
val loadBalancerFactory = mock[PartitionedLoadBalancerFactory[Int]]
val clusterIoClient = new ClusterIoClient {
@@ -885,6 +895,7 @@ class PartitionedNetworkClientSpec extends BaseNetworkClientSpecification {
def nodesForOneReplica(id: Int, c: Option[Long] = None, pc: Option[Long] = None) = null
def nodesForPartitionedId(id:Int, c: Option[Long] = None, pc: Option[Long] = None) = null
def nodesForPartitions(id: Int, partitions: Set[Int], c: Option[Long] = None, pc: Option[Long] = None) = null
+ def nextNodes(id: Int, capability: Option[Long], persistentCapability: Option[Long]): util.LinkedHashSet[Node] = null
}
val loadBalancerFactory = mock[PartitionedLoadBalancerFactory[Int]]
val clusterIoClient = new ClusterIoClient {
@@ -919,6 +930,8 @@ class PartitionedNetworkClientSpec extends BaseNetworkClientSpecification {
def nodesForOneReplica(id: Int, c: Option[Long] = None, pc: Option[Long] = None) = null
def nodesForPartitionedId(id:Int, c: Option[Long] = None, pc: Option[Long] = None) = null
def nodesForPartitions(id: Int, partitions: Set[Int], c: Option[Long] = None, pc: Option[Long] = None) = null
+ def nextNodes(id: Int, capability: Option[Long], persistentCapability: Option[Long]): util.LinkedHashSet[Node] = null
+
}
val loadBalancerFactory = mock[PartitionedLoadBalancerFactory[Int]]
val clusterIoClient = new ClusterIoClient {
@@ -1004,5 +1017,172 @@ class PartitionedNetworkClientSpec extends BaseNetworkClientSpecification {
}
}
+ "PartitionedNetworkClientFailOver" should {
+ "fail-over to next node" in {
+ val nc2 = new PartitionedNetworkClientFailOver[Int] with ClusterClientComponent with ClusterIoClientComponent with PartitionedLoadBalancerFactoryComponent[Int] {
+ val lb = new PartitionedLoadBalancer[Int] {
+ var iter = PartitionedNetworkClientSpec.this.nodes.iterator
+ def nextNode(id: Int, c: Option[Long] = None, pc : Option[Long] = None) = {
+ if (!iter.hasNext ) iter = PartitionedNetworkClientSpec.this.nodes.iterator
+ Some(iter.next)
+ }
+ def nodesForOneReplica(id: Int, c: Option[Long] = None, pc: Option[Long] = None) = null
+ def nodesForPartitionedId(id:Int, c: Option[Long] = None, pc: Option[Long] = None) = null
+ def nodesForPartitions(id: Int, partitions: Set[Int], c: Option[Long] = None, pc: Option[Long] = None) = null
+ def nextNodes(id: Int, capability: Option[Long], persistentCapability: Option[Long]): util.LinkedHashSet[Node] = {
+ val result : util.LinkedHashSet[Node] = new util.LinkedHashSet()
+ result.addAll(JavaConversions.asJavaCollection( PartitionedNetworkClientSpec.this.nodes))
+ result
+ }
+ }
+ val loadBalancerFactory = mock[PartitionedLoadBalancerFactory[Int]]
+ val clusterIoClient = new ClusterIoClient {
+ var invocationMap = Map(1 -> 0, 2 -> 0, 3 -> 0)
+ def sendMessage[RequestMsg, ResponseMsg](node: Node, requestCtx: Request[RequestMsg, ResponseMsg]) {
+ val oldVal = invocationMap(node.id)
+ invocationMap = invocationMap + (node.id -> (oldVal+1))
+ if (node.id == 1) {
+ requestCtx.onFailure(new ConnectException with RequestAccess[Request[RequestMsg, ResponseMsg]] {
+ def request = requestCtx
+ })
+ } else {
+ requestCtx.onSuccess(requestCtx.outputSerializer.requestToBytes(requestCtx.message))
+ }
+ }
+ def nodesChanged(nodes: Set[Node]) = {PartitionedNetworkClientSpec.this.endpoints}
+ def shutdown {}
+ }
+ val clusterClient = PartitionedNetworkClientSpec.this.clusterClient
+ }
+ nc2.clusterClient.nodes returns nodeSet
+ nc2.clusterClient.isConnected returns true
+ nc2.loadBalancerFactory.newLoadBalancer(endpoints) returns nc2.lb
+ nc2.start
+
+ // check pre-test assumptions
+ nc2.clusterIoClient.invocationMap(1) must be_==(0)
+ nc2.clusterIoClient.invocationMap(2) must be_==(0)
+ nc2.clusterIoClient.invocationMap(3) must be_==(0)
+
+ nc2.sendRequest[Ping, Ping](0, request)
+
+ // check post-test values
+ nc2.clusterIoClient.invocationMap(1) must be_==(1)
+ nc2.clusterIoClient.invocationMap(2) must be_==(1)
+ nc2.clusterIoClient.invocationMap(3) must be_==(0)
+
+ }
+
+ "fail when the fail over node fails" in {
+ val nc2 = new PartitionedNetworkClientFailOver[Int] with ClusterClientComponent with ClusterIoClientComponent with PartitionedLoadBalancerFactoryComponent[Int] {
+ val lb = new PartitionedLoadBalancer[Int] {
+ var iter = PartitionedNetworkClientSpec.this.nodes.iterator
+ def nextNode(id: Int, c: Option[Long] = None, pc : Option[Long] = None) = {
+ if (!iter.hasNext ) iter = PartitionedNetworkClientSpec.this.nodes.iterator
+ Some(iter.next)
+ }
+ def nodesForOneReplica(id: Int, c: Option[Long] = None, pc: Option[Long] = None) = null
+ def nodesForPartitionedId(id:Int, c: Option[Long] = None, pc: Option[Long] = None) = null
+ def nodesForPartitions(id: Int, partitions: Set[Int], c: Option[Long] = None, pc: Option[Long] = None) = null
+ def nextNodes(id: Int, capability: Option[Long], persistentCapability: Option[Long]): util.LinkedHashSet[Node] = {
+ val result : util.LinkedHashSet[Node] = new util.LinkedHashSet()
+ result.addAll(JavaConversions.asJavaCollection( PartitionedNetworkClientSpec.this.nodes))
+ result
+ }
+ }
+ val loadBalancerFactory = mock[PartitionedLoadBalancerFactory[Int]]
+ val clusterIoClient = new ClusterIoClient {
+ var invocationMap = Map(1 -> 0, 2 -> 0, 3 -> 0)
+ def sendMessage[RequestMsg, ResponseMsg](node: Node, requestCtx: Request[RequestMsg, ResponseMsg]) {
+ val oldVal = invocationMap(node.id)
+ invocationMap = invocationMap + (node.id -> (oldVal+1))
+ if (node.id == 1 || node.id == 2) {
+ requestCtx.onFailure(new ConnectException with RequestAccess[Request[RequestMsg, ResponseMsg]] {
+ def request = requestCtx
+ })
+ } else {
+ requestCtx.onSuccess(requestCtx.outputSerializer.requestToBytes(requestCtx.message))
+ }
+ }
+ def nodesChanged(nodes: Set[Node]) = {PartitionedNetworkClientSpec.this.endpoints}
+ def shutdown {}
+ }
+ val clusterClient = PartitionedNetworkClientSpec.this.clusterClient
+ }
+ nc2.clusterClient.nodes returns nodeSet
+ nc2.clusterClient.isConnected returns true
+ nc2.loadBalancerFactory.newLoadBalancer(endpoints) returns nc2.lb
+ nc2.start
+
+ // check pre-test assumptions
+ nc2.clusterIoClient.invocationMap(1) must be_==(0)
+ nc2.clusterIoClient.invocationMap(2) must be_==(0)
+ nc2.clusterIoClient.invocationMap(3) must be_==(0)
+
+ val future = nc2.sendRequest[Ping, Ping](0, request)
+ future.get must throwA[Exception]
+
+ // check post-test values
+ nc2.clusterIoClient.invocationMap(1) must be_==(1)
+ nc2.clusterIoClient.invocationMap(2) must be_==(1)
+ nc2.clusterIoClient.invocationMap(3) must be_==(0)
+
+ }
+
+ "fail-over must propagate to multiple nodes, if fail over nodes fail as well" in {
+ val nc2 = new PartitionedNetworkClientFailOver[Int] with ClusterClientComponent with ClusterIoClientComponent with PartitionedLoadBalancerFactoryComponent[Int] {
+ override val failOverAttempts = 1000;
+ val lb = new PartitionedLoadBalancer[Int] {
+ var iter = PartitionedNetworkClientSpec.this.nodes.iterator
+ def nextNode(id: Int, c: Option[Long] = None, pc : Option[Long] = None) = {
+ if (!iter.hasNext ) iter = PartitionedNetworkClientSpec.this.nodes.iterator
+ Some(iter.next)
+ }
+ def nodesForOneReplica(id: Int, c: Option[Long] = None, pc: Option[Long] = None) = null
+ def nodesForPartitionedId(id:Int, c: Option[Long] = None, pc: Option[Long] = None) = null
+ def nodesForPartitions(id: Int, partitions: Set[Int], c: Option[Long] = None, pc: Option[Long] = None) = null
+ def nextNodes(id: Int, capability: Option[Long], persistentCapability: Option[Long]): util.LinkedHashSet[Node] = {
+ val result : util.LinkedHashSet[Node] = new util.LinkedHashSet()
+ result.addAll(JavaConversions.asJavaCollection( PartitionedNetworkClientSpec.this.nodes))
+ result
+ }
+ }
+ val loadBalancerFactory = mock[PartitionedLoadBalancerFactory[Int]]
+ val clusterIoClient = new ClusterIoClient {
+ var invocationMap = Map(1 -> 0, 2 -> 0, 3 -> 0)
+ def sendMessage[RequestMsg, ResponseMsg](node: Node, requestCtx: Request[RequestMsg, ResponseMsg]) {
+ val oldVal = invocationMap(node.id)
+ invocationMap = invocationMap + (node.id -> (oldVal+1))
+ if (node.id == 1 || node.id == 2) {
+ requestCtx.onFailure(new ConnectException with RequestAccess[Request[RequestMsg, ResponseMsg]] {
+ def request = requestCtx
+ })
+ } else {
+ requestCtx.onSuccess(requestCtx.outputSerializer.requestToBytes(requestCtx.message))
+ }
+ }
+ def nodesChanged(nodes: Set[Node]) = {PartitionedNetworkClientSpec.this.endpoints}
+ def shutdown {}
+ }
+ val clusterClient = PartitionedNetworkClientSpec.this.clusterClient
+ }
+ nc2.clusterClient.nodes returns nodeSet
+ nc2.clusterClient.isConnected returns true
+ nc2.loadBalancerFactory.newLoadBalancer(endpoints) returns nc2.lb
+ nc2.start
+
+ // check pre-test assumptions
+ nc2.clusterIoClient.invocationMap(1) must be_==(0)
+ nc2.clusterIoClient.invocationMap(2) must be_==(0)
+ nc2.clusterIoClient.invocationMap(3) must be_==(0)
+
+ nc2.sendRequest[Ping, Ping](0, request)
+
+ // check post-test values
+ nc2.clusterIoClient.invocationMap(1) must be_==(1)
+ nc2.clusterIoClient.invocationMap(2) must be_==(1)
+ nc2.clusterIoClient.invocationMap(3) must be_==(1)
+ }
+ }
def messageCustomizer(node: Node, ids: Set[Int]): Ping = new Ping
}
\ No newline at end of file
diff --git a/network/src/test/scala/com/linkedin/norbert/network/partitioned/loadbalancer/ConsistentHashPartitionedLoadBalancerFactorySpec.scala b/network/src/test/scala/com/linkedin/norbert/network/partitioned/loadbalancer/ConsistentHashPartitionedLoadBalancerFactorySpec.scala
index c084e65d..27ecb63c 100644
--- a/network/src/test/scala/com/linkedin/norbert/network/partitioned/loadbalancer/ConsistentHashPartitionedLoadBalancerFactorySpec.scala
+++ b/network/src/test/scala/com/linkedin/norbert/network/partitioned/loadbalancer/ConsistentHashPartitionedLoadBalancerFactorySpec.scala
@@ -185,15 +185,15 @@ class ConsistentHashPartitionedLoadBalancerFactorySpec extends SpecificationWith
val lb = loadBalancerFactory.newLoadBalancer(toEndpoints(nodes))
val accessVector = Array(0,0,0,0,0,0)
(0 to 11).foreach { (i) =>
- val node1 : Node = lb.nextNode(EId(1210), Some(0x1L), Some(2L)).get
+ val node1 : Option[Node] = lb.nextNode(EId(1210), Some(0x1L), Some(2L))
if (!node1.eq(None))
- accessVector(node1.id) = accessVector(node1.id) + 1
+ accessVector(node1.get.id) = accessVector(node1.get.id) + 1
}
(0 to 11).foreach { (i) =>
- val node2 : Node = lb.nextNode(EId(1210), Some(0x2L), Some(2L)).get
+ val node2 : Option[Node] = lb.nextNode(EId(1210), Some(0x2L), Some(2L))
if (!node2.eq(None))
- accessVector(node2.id) = accessVector(node2.id) + 1
+ accessVector(node2.get.id) = accessVector(node2.get.id) + 1
}
accessVector(0) must be_==(accessVector(3))
diff --git a/network/src/test/scala/com/linkedin/norbert/network/partitioned/loadbalancer/PartitionedConsistentHashedLoadBalancerSpec.scala b/network/src/test/scala/com/linkedin/norbert/network/partitioned/loadbalancer/PartitionedConsistentHashedLoadBalancerSpec.scala
index df1e6495..996867ec 100644
--- a/network/src/test/scala/com/linkedin/norbert/network/partitioned/loadbalancer/PartitionedConsistentHashedLoadBalancerSpec.scala
+++ b/network/src/test/scala/com/linkedin/norbert/network/partitioned/loadbalancer/PartitionedConsistentHashedLoadBalancerSpec.scala
@@ -4,6 +4,9 @@ import org.specs.SpecificationWithJUnit
import com.linkedin.norbert.network.common.Endpoint
import com.linkedin.norbert.cluster.{InvalidClusterException, Node}
+import scala.collection.JavaConversions
+import scala.collection.immutable.HashSet
+
/*
* Copyright 2009-2010 LinkedIn, Inc
*
@@ -21,13 +24,16 @@ import com.linkedin.norbert.cluster.{InvalidClusterException, Node}
*/
class PartitionedConsistentHashedLoadBalancerSpec extends SpecificationWithJUnit {
- class TestLBF(numPartitions: Int, csr: Boolean = true)
+ class TestLBF(numPartitions: Int, hashFunction:((Int) => Int), numReplicas:Int = 10, csr: Boolean = true)
extends PartitionedConsistentHashedLoadBalancerFactory[Int](numPartitions,
- 10,
- (id: Int) => HashFunctions.fnv(BigInt(id).toByteArray),
+ numReplicas,
+ hashFunction,
(str: String) => str.hashCode(),
- csr)
-
+ csr) {
+ def this(numPartitions: Int, numReplicas:Int = 10, csr: Boolean = true) =
+ this(numPartitions, (id: Int) => HashFunctions.fnv(BigInt(id).toByteArray), numReplicas, csr)
+ }
+
class TestEndpoint(val node: Node, var csr: Boolean) extends Endpoint {
def canServeRequests = csr
@@ -53,7 +59,15 @@ class PartitionedConsistentHashedLoadBalancerSpec extends SpecificationWithJUnit
// loadBalancerFactory.partitionForId(EId(1210)) must be_==(0)
// }
// }
-
+
+ val overlappingAtPartitionZero = Set(
+ Node(0, "localhost:31311", true, Set(0,1)),
+ Node(1, "localhost:31312", true, Set(2)),
+ Node(2, "localhost:31313", true, Set(0,3)),
+ Node(3, "localhost:31314", true, Set(0,4)),
+ Node(4, "localhost:31315", true, Set(4))
+ )
+
val sampleNodes = Set(
Node(0, "localhost:31313", true, Set(0, 1), Some(0x1), Some(0)),
Node(1, "localhost:31313", true, Set(1, 2)),
@@ -79,7 +93,7 @@ class PartitionedConsistentHashedLoadBalancerSpec extends SpecificationWithJUnit
Node(0, "localhost:31313", true, Set[Int]()),
Node(1, "localhost:31313", true, Set[Int]()))
- new TestLBF(2, false).newLoadBalancer(toEndpoints(nodes)) must throwA[InvalidClusterException]
+ new TestLBF(2,10, false).newLoadBalancer(toEndpoints(nodes)) must throwA[InvalidClusterException]
}
"throw InvalidClusterException if one partition is unavailable, and the LBF cannot serve requests in that state, " in {
@@ -87,8 +101,8 @@ class PartitionedConsistentHashedLoadBalancerSpec extends SpecificationWithJUnit
Node(0, "localhost:31313", true, Set(1)),
Node(1, "localhost:31313", true, Set[Int]()))
- new TestLBF(2, true).newLoadBalancer(toEndpoints(nodes)) must not (throwA[InvalidClusterException])
- new TestLBF(2, false).newLoadBalancer(toEndpoints(nodes)) must throwA[InvalidClusterException]
+ new TestLBF(2,10, true).newLoadBalancer(toEndpoints(nodes)) must not (throwA[InvalidClusterException])
+ new TestLBF(2,10, false).newLoadBalancer(toEndpoints(nodes)) must throwA[InvalidClusterException]
}
"successfully calculate broadcast nodes" in {
@@ -144,10 +158,20 @@ class PartitionedConsistentHashedLoadBalancerSpec extends SpecificationWithJUnit
// Mark node 4 down
markUnavailable(endpoints, 4)
- val lbf = new TestLBF(5, false)
+ val lbf = new TestLBF(5, 10, false)
var loadBalancer = lbf.newLoadBalancer(endpoints)
loadBalancer.nodesForOneReplica(0, Some(0), Some(0)) must throwA[InvalidClusterException]
}
+
+ "return a complete set of nodes within partition 0" in {
+ val nodes = overlappingAtPartitionZero
+ val endpoints = toEndpoints(nodes)
+ val lbf = new TestLBF(5, (i:Int)=>i, 10, true)
+ val loadBalancer = lbf.newLoadBalancer(endpoints)
+ val lbNodes = JavaConversions.asScalaSet(loadBalancer.nextNodes(0, None, None))
+ val lbNodeIds = lbNodes.map((n:Node) => n.id)
+ lbNodeIds must be_==(Set(0, 2, 3))
+ }
}
}
\ No newline at end of file
diff --git a/project/Build.scala b/project/Build.scala
index 453db593..c01fa7d9 100644
--- a/project/Build.scala
+++ b/project/Build.scala
@@ -16,7 +16,7 @@ object BuildSettings {
val buildSettings = Defaults.defaultSettings ++ Seq (
organization := "com.linkedin",
version := "0.6.65",
- scalaVersion := "2.8.1",
+ scalaVersion := "2.10.4",
credentialsSetting,
publishArtifact in (Compile, packageDoc) := false,
publishTo <<= (version) { version: String =>
@@ -35,19 +35,21 @@ object Resolvers {
object ClusterDependencies {
val ZOOKEEPER_VER = "3.3.0"
- val PROTOBUF_VER = "2.4.0a"
+ val PROTOBUF_VER = "2.6.0"
val LOG4J_VER = "1.2.16"
- val SPECS_VER = "1.6.7"
+ val SPECS_VER = "1.6.9"
val MOCKITO_VER = "1.8.4"
val CGLIB_VER = "2.1_3"
val OBJENESIS = "1.0"
val JUNIT_VER = "4.8.1"
-
+ val AKKA_ACTOR_VER = "2.4"
val zookeeper = "org.apache.zookeeper" % "zookeeper" % ZOOKEEPER_VER
val protobuf = "com.google.protobuf" % "protobuf-java" % PROTOBUF_VER
+ val akkaActor = "com.typesafe.akka" % "akka-actor" % AKKA_ACTOR_VER
+
val log4j = "log4j" % "log4j" % LOG4J_VER
val specs = "org.scala-tools.testing" %% "specs" % SPECS_VER % "test"
@@ -60,7 +62,7 @@ object ClusterDependencies {
val junit = "junit" % "junit" % JUNIT_VER % "test"
- val deps = Seq(zookeeper, protobuf, log4j, specs, mockito, cglib, objenesis, junit)
+ val deps = Seq(zookeeper, protobuf, log4j, specs, mockito, cglib, objenesis, junit, akkaActor)
}
object NetworkDependencies {