From d9c94c6594d3893460c089258ebee7d576c004db Mon Sep 17 00:00:00 2001 From: Adam Retter Date: Thu, 14 Dec 2023 09:45:09 +0100 Subject: [PATCH 01/15] We should not expose the Native Object Handle to the public --- java/src/main/java/org/rocksdb/ColumnFamilyOptions.java | 2 +- java/src/main/java/org/rocksdb/Options.java | 2 +- java/src/main/java/org/rocksdb/RocksDB.java | 2 +- java/src/main/java/org/rocksdb/RocksObject.java | 3 --- 4 files changed, 3 insertions(+), 6 deletions(-) diff --git a/java/src/main/java/org/rocksdb/ColumnFamilyOptions.java b/java/src/main/java/org/rocksdb/ColumnFamilyOptions.java index d25f8c73bc7b..aa7fe8f944ee 100644 --- a/java/src/main/java/org/rocksdb/ColumnFamilyOptions.java +++ b/java/src/main/java/org/rocksdb/ColumnFamilyOptions.java @@ -147,7 +147,7 @@ public ColumnFamilyOptions optimizeForSmallDb() { @Override public ColumnFamilyOptions optimizeForSmallDb(final Cache cache) { - optimizeForSmallDb(nativeHandle_, cache.getNativeHandle()); + optimizeForSmallDb(nativeHandle_, cache.nativeHandle_); return this; } diff --git a/java/src/main/java/org/rocksdb/Options.java b/java/src/main/java/org/rocksdb/Options.java index 675837df7a09..af30279dbb81 100644 --- a/java/src/main/java/org/rocksdb/Options.java +++ b/java/src/main/java/org/rocksdb/Options.java @@ -169,7 +169,7 @@ public Options optimizeForSmallDb() { @Override public Options optimizeForSmallDb(final Cache cache) { - optimizeForSmallDb(nativeHandle_, cache.getNativeHandle()); + optimizeForSmallDb(nativeHandle_, cache.nativeHandle_); return this; } diff --git a/java/src/main/java/org/rocksdb/RocksDB.java b/java/src/main/java/org/rocksdb/RocksDB.java index fe2f38af64f9..865fb2bb097d 100644 --- a/java/src/main/java/org/rocksdb/RocksDB.java +++ b/java/src/main/java/org/rocksdb/RocksDB.java @@ -830,7 +830,7 @@ public ColumnFamilyHandle createColumnFamilyWithImport( final int metadataNum = metadatas.size(); final long[] metadataHandleList = new long[metadataNum]; for (int i = 0; i < metadataNum; i++) { - metadataHandleList[i] = metadatas.get(i).getNativeHandle(); + metadataHandleList[i] = metadatas.get(i).nativeHandle_; } final ColumnFamilyHandle columnFamilyHandle = new ColumnFamilyHandle(this, createColumnFamilyWithImport(nativeHandle_, columnFamilyDescriptor.getName(), diff --git a/java/src/main/java/org/rocksdb/RocksObject.java b/java/src/main/java/org/rocksdb/RocksObject.java index f07e1018afd6..7f4d63755d17 100644 --- a/java/src/main/java/org/rocksdb/RocksObject.java +++ b/java/src/main/java/org/rocksdb/RocksObject.java @@ -39,7 +39,4 @@ protected void disposeInternal() { protected abstract void disposeInternal(final long handle); - public long getNativeHandle() { - return nativeHandle_; - } } From 16702c35283d4644f7db8edcde7b5b087d78b4d2 Mon Sep 17 00:00:00 2001 From: Adam Retter Date: Thu, 14 Dec 2023 09:45:23 +0100 Subject: [PATCH 02/15] Improve Javadoc in RocksCallbackObject --- java/src/main/java/org/rocksdb/RocksCallbackObject.java | 9 ++++++++- java/src/main/java/org/rocksdb/RocksObject.java | 3 +++ 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/java/src/main/java/org/rocksdb/RocksCallbackObject.java b/java/src/main/java/org/rocksdb/RocksCallbackObject.java index 8a7c3713e9be..8fd11d3e5b0c 100644 --- a/java/src/main/java/org/rocksdb/RocksCallbackObject.java +++ b/java/src/main/java/org/rocksdb/RocksCallbackObject.java @@ -13,7 +13,9 @@ * which are called from C++ via JNI. *

* RocksCallbackObject is the base-class any RocksDB classes that acts as a - * callback from some underlying underlying native C++ {@code rocksdb} object. + * callback from some underlying native C++ {@code rocksdb} object. + * Its implementation is always coupled with + * a C++ implementation of {@code ROCKSDB_NAMESPACE::JniCallback}. *

* The use of {@code RocksObject} should always be preferred over * {@link RocksCallbackObject} if callbacks are not required. @@ -21,6 +23,11 @@ public abstract class RocksCallbackObject extends AbstractImmutableNativeReference { + /** + * An immutable reference to the value of the C++ pointer pointing to some + * underlying native RocksDB C++ object that + * extends {@code ROCKSDB_NAMESPACE::JniCallback}. + */ protected final long nativeHandle_; protected RocksCallbackObject(final long... nativeParameterHandles) { diff --git a/java/src/main/java/org/rocksdb/RocksObject.java b/java/src/main/java/org/rocksdb/RocksObject.java index 7f4d63755d17..cd5de70acbe2 100644 --- a/java/src/main/java/org/rocksdb/RocksObject.java +++ b/java/src/main/java/org/rocksdb/RocksObject.java @@ -39,4 +39,7 @@ protected void disposeInternal() { protected abstract void disposeInternal(final long handle); +// long getNativeHandle() { +// return nativeHandle_; +// } } From 7ec5c901db4b54b18a7780dd7483e5d871965ac6 Mon Sep 17 00:00:00 2001 From: Adam Retter Date: Thu, 14 Dec 2023 18:09:18 +0100 Subject: [PATCH 03/15] Constructor should be package-private --- java/src/main/java/org/rocksdb/KeyMayExist.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/java/src/main/java/org/rocksdb/KeyMayExist.java b/java/src/main/java/org/rocksdb/KeyMayExist.java index 6149b85292aa..cd2267528d85 100644 --- a/java/src/main/java/org/rocksdb/KeyMayExist.java +++ b/java/src/main/java/org/rocksdb/KeyMayExist.java @@ -25,7 +25,7 @@ public int hashCode() { public enum KeyMayExistEnum { kNotExist, kExistsWithoutValue, kExistsWithValue } - public KeyMayExist(final KeyMayExistEnum exists, final int valueLength) { + KeyMayExist(final KeyMayExistEnum exists, final int valueLength) { this.exists = exists; this.valueLength = valueLength; } From 32c0fafac23575e1b27df8bb69ee8dbcdca38e20 Mon Sep 17 00:00:00 2001 From: Adam Retter Date: Thu, 14 Dec 2023 20:22:58 +0100 Subject: [PATCH 04/15] Standardise naming --- .../src/main/java/org/rocksdb/SstFileManager.java | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/java/src/main/java/org/rocksdb/SstFileManager.java b/java/src/main/java/org/rocksdb/SstFileManager.java index efce94db24fe..465cf1cfb731 100644 --- a/java/src/main/java/org/rocksdb/SstFileManager.java +++ b/java/src/main/java/org/rocksdb/SstFileManager.java @@ -17,11 +17,10 @@ */ //@ThreadSafe public final class SstFileManager extends RocksObject { - - public static final long RATE_BYTES_PER_SEC_DEFAULT = 0; - public static final boolean DELETE_EXISTING_TRASH_DEFAULT = true; - public static final double MAX_TRASH_DB_RATION_DEFAULT = 0.25; - public static final long BYTES_MAX_DELETE_CHUNK_DEFAULT = 64 * 1024 * 1024; + public static final long DEFAULT_RATE_BYTES_PER_SEC = 0; + public static final boolean DEFAULT_DELETE_EXISTING_TRASH = true; + public static final double DEFAULT_MAX_TRASH_DB_RATION = 0.25; + public static final long DEFAULT_BYTES_MAX_DELETE_CHUNK = 64 * 1024 * 1024; /** * Create a new SstFileManager that can be shared among multiple RocksDB @@ -46,7 +45,7 @@ public SstFileManager(final Env env) throws RocksDBException { */ public SstFileManager(final Env env, /*@Nullable*/ final Logger logger) throws RocksDBException { - this(env, logger, RATE_BYTES_PER_SEC_DEFAULT); + this(env, logger, DEFAULT_RATE_BYTES_PER_SEC); } /** @@ -66,7 +65,7 @@ public SstFileManager(final Env env, /*@Nullable*/ final Logger logger) */ public SstFileManager(final Env env, /*@Nullable*/ final Logger logger, final long rateBytesPerSec) throws RocksDBException { - this(env, logger, rateBytesPerSec, MAX_TRASH_DB_RATION_DEFAULT); + this(env, logger, rateBytesPerSec, DEFAULT_MAX_TRASH_DB_RATION); } /** @@ -91,7 +90,7 @@ public SstFileManager(final Env env, /*@Nullable*/ final Logger logger, final long rateBytesPerSec, final double maxTrashDbRatio) throws RocksDBException { this(env, logger, rateBytesPerSec, maxTrashDbRatio, - BYTES_MAX_DELETE_CHUNK_DEFAULT); + DEFAULT_BYTES_MAX_DELETE_CHUNK); } /** From f71c23005cb0a30861cf5ac9458c2b6e58e015f7 Mon Sep 17 00:00:00 2001 From: Adam Retter Date: Thu, 14 Dec 2023 22:30:40 +0100 Subject: [PATCH 05/15] Fix Unary operator issue --- java/src/main/java/org/rocksdb/util/BytewiseComparator.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/java/src/main/java/org/rocksdb/util/BytewiseComparator.java b/java/src/main/java/org/rocksdb/util/BytewiseComparator.java index 202241d3bad9..fd55fdf8c57c 100644 --- a/java/src/main/java/org/rocksdb/util/BytewiseComparator.java +++ b/java/src/main/java/org/rocksdb/util/BytewiseComparator.java @@ -46,7 +46,7 @@ static int _compare(final ByteBuffer a, final ByteBuffer b) { if (a.remaining() < b.remaining()) { r = -1; } else if (a.remaining() > b.remaining()) { - r = +1; + r = 1; } } return r; From d94828758f1eb5346ddcf10488d92b8d50fc3cca Mon Sep 17 00:00:00 2001 From: Adam Retter Date: Thu, 14 Dec 2023 22:56:35 +0100 Subject: [PATCH 06/15] Add missing JavaDoc, and fix JavaDoc warnings --- .../org/rocksdb/AbstractCompactionFilter.java | 17 + .../AbstractCompactionFilterFactory.java | 7 +- .../java/org/rocksdb/AbstractComparator.java | 14 +- .../rocksdb/AbstractComparatorJniBridge.java | 2 +- .../org/rocksdb/AbstractEventListener.java | 92 ++++ .../AbstractImmutableNativeReference.java | 5 + .../org/rocksdb/AbstractMutableOptions.java | 140 +++++ .../org/rocksdb/AbstractRocksIterator.java | 6 + .../main/java/org/rocksdb/AbstractSlice.java | 19 + .../java/org/rocksdb/AbstractTableFilter.java | 3 + .../rocksdb/AbstractTransactionNotifier.java | 12 + .../java/org/rocksdb/AbstractWriteBatch.java | 18 + .../AdvancedColumnFamilyOptionsInterface.java | 4 +- ...edMutableColumnFamilyOptionsInterface.java | 2 + .../org/rocksdb/BackgroundErrorReason.java | 18 + .../main/java/org/rocksdb/BackupEngine.java | 5 + .../org/rocksdb/BlockBasedTableConfig.java | 8 +- .../java/org/rocksdb/BuiltinComparator.java | 19 +- .../java/org/rocksdb/ByteBufferGetStatus.java | 11 + java/src/main/java/org/rocksdb/Cache.java | 9 + .../rocksdb/CassandraCompactionFilter.java | 18 +- .../rocksdb/CassandraValueMergeOperator.java | 14 + .../src/main/java/org/rocksdb/Checkpoint.java | 16 + .../main/java/org/rocksdb/ChecksumType.java | 10 +- .../src/main/java/org/rocksdb/ClockCache.java | 4 + .../java/org/rocksdb/ColumnFamilyHandle.java | 5 + .../rocksdb/ColumnFamilyOptionsInterface.java | 5 + .../java/org/rocksdb/CompactRangeOptions.java | 67 ++- .../java/org/rocksdb/CompactionJobInfo.java | 6 + .../java/org/rocksdb/CompactionJobStats.java | 18 +- .../java/org/rocksdb/CompactionOptions.java | 17 +- .../org/rocksdb/CompactionOptionsFIFO.java | 3 + .../rocksdb/CompactionOptionsUniversal.java | 3 + .../java/org/rocksdb/CompactionReason.java | 7 + .../java/org/rocksdb/CompactionStyle.java | 15 + .../java/org/rocksdb/ComparatorOptions.java | 3 + .../java/org/rocksdb/CompressionOptions.java | 60 +++ .../java/org/rocksdb/CompressionType.java | 35 ++ .../org/rocksdb/ConcurrentTaskLimiter.java | 8 + .../rocksdb/ConcurrentTaskLimiterImpl.java | 10 + .../main/java/org/rocksdb/ConfigOptions.java | 41 +- .../java/org/rocksdb/DBOptionsInterface.java | 178 +++---- java/src/main/java/org/rocksdb/DbPath.java | 6 + .../main/java/org/rocksdb/DirectSlice.java | 9 + .../main/java/org/rocksdb/Experimental.java | 5 + .../rocksdb/ExternalFileIngestionInfo.java | 3 + java/src/main/java/org/rocksdb/Filter.java | 7 +- .../java/org/rocksdb/FilterPolicyType.java | 5 +- .../main/java/org/rocksdb/FlushJobInfo.java | 3 + .../main/java/org/rocksdb/FlushReason.java | 62 +++ java/src/main/java/org/rocksdb/GetStatus.java | 8 + .../rocksdb/HashLinkedListMemTableConfig.java | 26 +- .../rocksdb/HashSkipListMemTableConfig.java | 14 +- .../main/java/org/rocksdb/HistogramData.java | 70 +++ .../main/java/org/rocksdb/HistogramType.java | 176 ++++++- java/src/main/java/org/rocksdb/Holder.java | 2 + .../rocksdb/ImportColumnFamilyOptions.java | 4 + .../java/org/rocksdb/IndexShorteningMode.java | 4 +- .../main/java/org/rocksdb/InfoLogLevel.java | 27 + .../rocksdb/IngestExternalFileOptions.java | 5 + .../main/java/org/rocksdb/KeyMayExist.java | 37 +- .../java/org/rocksdb/LiveFileMetaData.java | 2 +- java/src/main/java/org/rocksdb/LogFile.java | 3 + java/src/main/java/org/rocksdb/Logger.java | 39 +- .../main/java/org/rocksdb/MemTableInfo.java | 3 + .../main/java/org/rocksdb/MergeOperator.java | 6 + .../rocksdb/MutableColumnFamilyOptions.java | 187 +++++++ .../MutableColumnFamilyOptionsInterface.java | 5 + .../java/org/rocksdb/MutableDBOptions.java | 87 ++++ .../rocksdb/MutableDBOptionsInterface.java | 5 + .../java/org/rocksdb/MutableOptionKey.java | 47 +- .../java/org/rocksdb/MutableOptionValue.java | 5 + .../main/java/org/rocksdb/OperationStage.java | 43 ++ .../main/java/org/rocksdb/OperationType.java | 15 + .../org/rocksdb/OptimisticTransactionDB.java | 4 +- .../rocksdb/OptimisticTransactionOptions.java | 6 + .../main/java/org/rocksdb/OptionString.java | 38 ++ .../main/java/org/rocksdb/OptionsUtil.java | 3 + .../main/java/org/rocksdb/PerfContext.java | 177 ++++++- java/src/main/java/org/rocksdb/PerfLevel.java | 20 +- .../java/org/rocksdb/PersistentCache.java | 11 + .../java/org/rocksdb/PlainTableConfig.java | 35 ++ .../org/rocksdb/PrepopulateBlobCache.java | 8 + java/src/main/java/org/rocksdb/Priority.java | 16 + java/src/main/java/org/rocksdb/Range.java | 7 + .../main/java/org/rocksdb/RateLimiter.java | 16 + .../java/org/rocksdb/RateLimiterMode.java | 12 + .../main/java/org/rocksdb/ReadOptions.java | 6 + java/src/main/java/org/rocksdb/ReadTier.java | 16 + .../RemoveEmptyValueCompactionFilter.java | 6 +- .../java/org/rocksdb/RocksCallbackObject.java | 5 + java/src/main/java/org/rocksdb/RocksDB.java | 202 +++++-- .../java/org/rocksdb/RocksDBException.java | 26 +- .../main/java/org/rocksdb/RocksIterator.java | 11 +- .../java/org/rocksdb/RocksMutableObject.java | 18 + .../main/java/org/rocksdb/RocksObject.java | 17 +- .../main/java/org/rocksdb/SanityLevel.java | 15 + .../org/rocksdb/SizeApproximationFlag.java | 12 + .../org/rocksdb/SkipListMemTableConfig.java | 3 + .../main/java/org/rocksdb/SstFileManager.java | 20 +- .../main/java/org/rocksdb/SstFileReader.java | 9 + .../org/rocksdb/SstFileReaderIterator.java | 7 + .../org/rocksdb/SstPartitionerFactory.java | 5 + .../SstPartitionerFixedPrefixFactory.java | 6 + java/src/main/java/org/rocksdb/StateType.java | 8 + .../src/main/java/org/rocksdb/Statistics.java | 19 + .../java/org/rocksdb/StatisticsCollector.java | 3 + .../java/org/rocksdb/StatsCollectorInput.java | 12 +- java/src/main/java/org/rocksdb/Status.java | 164 +++++- .../org/rocksdb/StringAppendOperator.java | 13 + .../rocksdb/TableFileCreationBriefInfo.java | 3 + .../org/rocksdb/TableFileCreationInfo.java | 3 + .../org/rocksdb/TableFileCreationReason.java | 19 + .../org/rocksdb/TableFileDeletionInfo.java | 3 + .../main/java/org/rocksdb/ThreadStatus.java | 10 + .../src/main/java/org/rocksdb/TickerType.java | 492 ++++++++++++++---- .../main/java/org/rocksdb/TraceOptions.java | 8 + .../main/java/org/rocksdb/Transaction.java | 120 +++-- .../main/java/org/rocksdb/TransactionDB.java | 50 ++ .../org/rocksdb/TransactionDBOptions.java | 19 +- .../java/org/rocksdb/TransactionOptions.java | 10 +- .../java/org/rocksdb/UInt64AddOperator.java | 3 + .../org/rocksdb/VectorMemTableConfig.java | 4 + .../java/org/rocksdb/WBWIRocksIterator.java | 59 ++- .../main/java/org/rocksdb/WalFileType.java | 3 + java/src/main/java/org/rocksdb/WalFilter.java | 7 + .../java/org/rocksdb/WalProcessingOption.java | 14 +- .../src/main/java/org/rocksdb/WriteBatch.java | 164 +++++- .../java/org/rocksdb/WriteBufferManager.java | 13 +- .../java/org/rocksdb/WriteStallCondition.java | 15 + .../main/java/org/rocksdb/WriteStallInfo.java | 3 + .../java/org/rocksdb/util/BufferUtil.java | 13 + .../main/java/org/rocksdb/util/ByteUtil.java | 5 +- .../org/rocksdb/util/BytewiseComparator.java | 5 + .../java/org/rocksdb/util/Environment.java | 127 ++++- .../java/org/rocksdb/util/IntComparator.java | 13 +- .../util/ReverseBytewiseComparator.java | 5 + .../main/java/org/rocksdb/util/SizeUnit.java | 35 +- .../org/rocksdb/CompactRangeOptionsTest.java | 18 +- .../java/org/rocksdb/SstFileManagerTest.java | 4 +- .../org/rocksdb/util/ByteBufferAllocator.java | 11 + .../util/CapturingWriteBatchHandler.java | 18 + 142 files changed, 3704 insertions(+), 402 deletions(-) diff --git a/java/src/main/java/org/rocksdb/AbstractCompactionFilter.java b/java/src/main/java/org/rocksdb/AbstractCompactionFilter.java index fd7eef4d4cfb..1f4a5e4a687c 100644 --- a/java/src/main/java/org/rocksdb/AbstractCompactionFilter.java +++ b/java/src/main/java/org/rocksdb/AbstractCompactionFilter.java @@ -10,14 +10,25 @@ *

* At present, we just permit an overriding Java class to wrap a C++ * implementation + * + * @param the concrete type of the {@link AbstractSlice} that the Compaction Filter uses. */ public abstract class AbstractCompactionFilter> extends RocksObject { + /** + * Context of the Compaction Filter. + */ public static class Context { private final boolean fullCompaction; private final boolean manualCompaction; + /** + * Context constructor. + * + * @param fullCompaction true to filter full compaction, false otherwise. + * @param manualCompaction true to filter manual compaction, false otherwise. + */ public Context(final boolean fullCompaction, final boolean manualCompaction) { this.fullCompaction = fullCompaction; this.manualCompaction = manualCompaction; @@ -43,6 +54,12 @@ public boolean isManualCompaction() { } } + /** + * Constructor to be called by subclasses to set the + * handle to the underlying C++ object. + * + * @param nativeHandle reference to the value of the C++ pointer pointing to the underlying native RocksDB C++ Compaction Filter. + */ protected AbstractCompactionFilter(final long nativeHandle) { super(nativeHandle); } diff --git a/java/src/main/java/org/rocksdb/AbstractCompactionFilterFactory.java b/java/src/main/java/org/rocksdb/AbstractCompactionFilterFactory.java index c10fb8a2a9bb..0cf7814101c1 100644 --- a/java/src/main/java/org/rocksdb/AbstractCompactionFilterFactory.java +++ b/java/src/main/java/org/rocksdb/AbstractCompactionFilterFactory.java @@ -14,6 +14,9 @@ public abstract class AbstractCompactionFilterFactory> extends RocksCallbackObject { + /** + * Constructs a new Compaction Filter Factory which has no underlying C++ object. + */ public AbstractCompactionFilterFactory() { super(0L); } @@ -26,8 +29,8 @@ protected long initializeNative(final long... nativeParameterHandles) { /** * Called from JNI, see compaction_filter_factory_jnicallback.cc * - * @param fullCompaction {@link AbstractCompactionFilter.Context#fullCompaction} - * @param manualCompaction {@link AbstractCompactionFilter.Context#manualCompaction} + * @param fullCompaction {@link AbstractCompactionFilter.Context#isFullCompaction()} + * @param manualCompaction {@link AbstractCompactionFilter.Context#isManualCompaction()} * * @return native handle of the CompactionFilter */ diff --git a/java/src/main/java/org/rocksdb/AbstractComparator.java b/java/src/main/java/org/rocksdb/AbstractComparator.java index 5cb33c812d1f..2c6ad2df4cf6 100644 --- a/java/src/main/java/org/rocksdb/AbstractComparator.java +++ b/java/src/main/java/org/rocksdb/AbstractComparator.java @@ -10,7 +10,7 @@ /** * Comparators are used by RocksDB to determine * the ordering of keys. - * + *

* Implementations of Comparators in Java should extend this class. */ public abstract class AbstractComparator @@ -20,6 +20,11 @@ public abstract class AbstractComparator super(); } + /** + * Construct an AbstractComparator. + * + * @param comparatorOptions options for the comparator. + */ protected AbstractComparator(final ComparatorOptions comparatorOptions) { super(comparatorOptions.nativeHandle_); } @@ -59,7 +64,7 @@ ComparatorType getComparatorType() { * Three-way key comparison. Implementations should provide a * total order * on keys that might be passed to it. - * + *

* The implementation may modify the {@code ByteBuffer}s passed in, though * it would be unconventional to modify the "limit" or any of the * underlying bytes. As a callback, RocksJava will ensure that {@code a} @@ -114,6 +119,11 @@ public void findShortSuccessor(final ByteBuffer key) { // no-op } + /** + * Returns true if we are using direct byte buffers. + * + * @return true if we are using direct byte buffers, false otherwise. + */ public final boolean usingDirectBuffers() { return usingDirectBuffers(nativeHandle_); } diff --git a/java/src/main/java/org/rocksdb/AbstractComparatorJniBridge.java b/java/src/main/java/org/rocksdb/AbstractComparatorJniBridge.java index d0ceef93d419..9bd1ff7694bc 100644 --- a/java/src/main/java/org/rocksdb/AbstractComparatorJniBridge.java +++ b/java/src/main/java/org/rocksdb/AbstractComparatorJniBridge.java @@ -12,7 +12,7 @@ * it holds methods which are called * from C++ to interact with a Comparator * written in Java. - * + *

* Placing these bridge methods in this * class keeps the API of the * {@link org.rocksdb.AbstractComparator} clean. diff --git a/java/src/main/java/org/rocksdb/AbstractEventListener.java b/java/src/main/java/org/rocksdb/AbstractEventListener.java index c9371c45eb0c..5c7f58ab6afb 100644 --- a/java/src/main/java/org/rocksdb/AbstractEventListener.java +++ b/java/src/main/java/org/rocksdb/AbstractEventListener.java @@ -12,28 +12,120 @@ */ @SuppressWarnings("PMD.AvoidDuplicateLiterals") public abstract class AbstractEventListener extends RocksCallbackObject implements EventListener { + + /** + * Callback events that can be enabled. + */ public enum EnabledEventCallback { + + /** + * Flush completed. + */ ON_FLUSH_COMPLETED((byte) 0x0), + + /** + * Flush beginning. + */ ON_FLUSH_BEGIN((byte) 0x1), + + /** + * Table file was deleted. + */ ON_TABLE_FILE_DELETED((byte) 0x2), + + /** + * Compaction beginning. + */ ON_COMPACTION_BEGIN((byte) 0x3), + + /** + * Compaction completed. + */ ON_COMPACTION_COMPLETED((byte) 0x4), + + /** + * Table file created. + */ ON_TABLE_FILE_CREATED((byte) 0x5), + + /** + * Started creation of Table file. + */ ON_TABLE_FILE_CREATION_STARTED((byte) 0x6), + + /** + * Memtable has been sealed. + */ ON_MEMTABLE_SEALED((byte) 0x7), + + /** + * Started deletion of Column Family handle. + */ ON_COLUMN_FAMILY_HANDLE_DELETION_STARTED((byte) 0x8), + + /** + * External file ingested. + */ ON_EXTERNAL_FILE_INGESTED((byte) 0x9), + + /** + * Background error. + */ ON_BACKGROUND_ERROR((byte) 0xA), + + /** + * Stall conditions have been changed. + */ ON_STALL_CONDITIONS_CHANGED((byte) 0xB), + + /** + * File read has finished. + */ ON_FILE_READ_FINISH((byte) 0xC), + + /** + * File write has finished. + */ ON_FILE_WRITE_FINISH((byte) 0xD), + + /** + * File flush has finished. + */ ON_FILE_FLUSH_FINISH((byte) 0xE), + + /** + * File sync has finished. + */ ON_FILE_SYNC_FINISH((byte) 0xF), + + /** + * Range file read sync finished. + */ ON_FILE_RANGE_SYNC_FINISH((byte) 0x10), + + /** + * File truncation has finished. + */ ON_FILE_TRUNCATE_FINISH((byte) 0x11), + + /** + * Closing a file has finished. + */ ON_FILE_CLOSE_FINISH((byte) 0x12), + + /** + * Flag has been set to be notified on file IO. + */ SHOULD_BE_NOTIFIED_ON_FILE_IO((byte) 0x13), + + /** + * Error recovery beginning. + */ ON_ERROR_RECOVERY_BEGIN((byte) 0x14), + + /** + * Error recovery completed. + */ ON_ERROR_RECOVERY_COMPLETED((byte) 0x15); private final byte value; diff --git a/java/src/main/java/org/rocksdb/AbstractImmutableNativeReference.java b/java/src/main/java/org/rocksdb/AbstractImmutableNativeReference.java index 173d63e9011e..8c500d8a5df2 100644 --- a/java/src/main/java/org/rocksdb/AbstractImmutableNativeReference.java +++ b/java/src/main/java/org/rocksdb/AbstractImmutableNativeReference.java @@ -22,6 +22,11 @@ public abstract class AbstractImmutableNativeReference */ protected final AtomicBoolean owningHandle_; + /** + * Construct an AbstractImmutableNativeReference. + * + * @param owningHandle true if this Java object owns the underlying C++ object, false otherwise. + */ protected AbstractImmutableNativeReference(final boolean owningHandle) { this.owningHandle_ = new AtomicBoolean(owningHandle); } diff --git a/java/src/main/java/org/rocksdb/AbstractMutableOptions.java b/java/src/main/java/org/rocksdb/AbstractMutableOptions.java index 577e89593eaa..7b3f5b39c2d4 100644 --- a/java/src/main/java/org/rocksdb/AbstractMutableOptions.java +++ b/java/src/main/java/org/rocksdb/AbstractMutableOptions.java @@ -9,12 +9,26 @@ * The constructor is protected, so it will always be used as a base class. */ public class AbstractMutableOptions { + /** + * Separator between Key/Value pairs. + */ protected static final String KEY_VALUE_PAIR_SEPARATOR = ";"; + + /** + * Separator between Key and Value. + */ protected static final char KEY_VALUE_SEPARATOR = '='; + + /** + * Separator between integers in an integer array. + */ static final String INT_ARRAY_INT_SEPARATOR = ":"; private static final String HAS_NOT_BEEN_SET = " has not been set"; + /** + * the keys. + */ protected final String[] keys; private final String[] values; @@ -62,12 +76,24 @@ public String toString() { return buffer.toString(); } + /** + * Builder base class for constructing Mutable Options. + * + * @param the type of the Mutable Options. + * @param the type of the Builder. + * @param the type of the Option Key. + */ public abstract static class AbstractMutableOptionsBuilder< T extends AbstractMutableOptions, U extends AbstractMutableOptionsBuilder, K extends MutableOptionKey> { private final Map> options = new LinkedHashMap<>(); private final List unknown = new ArrayList<>(); + /** + * Return the builder. + * + * @return the builder. + */ protected abstract U self(); /** @@ -87,6 +113,11 @@ public abstract static class AbstractMutableOptionsBuilder< */ protected abstract T build(final String[] keys, final String[] values); + /** + * Construct a subclass instance of {@link AbstractMutableOptions}. + * + * @return an instance of the options. + */ public T build() { final String[] keys = new String[options.size()]; final String[] values = new String[options.size()]; @@ -101,6 +132,14 @@ public T build() { return build(keys, values); } + /** + * Set an option of `Double` type. + * + * @param key the key. + * @param value the value. + * + * @return the builder. + */ protected U setDouble( final K key, final double value) { if (key.getValueType() != MutableOptionKey.ValueType.DOUBLE) { @@ -111,6 +150,13 @@ protected U setDouble( return self(); } + /** + * Get an option of `Double` type. + * + * @param key the key. + * + * @return the value of the option. + */ protected double getDouble(final K key) throws NoSuchElementException, NumberFormatException { final MutableOptionValue value = options.get(key); @@ -120,6 +166,14 @@ protected double getDouble(final K key) return value.asDouble(); } + /** + * Set an option of `Long` type. + * + * @param key the key. + * @param value the value. + * + * @return the builder. + */ protected U setLong( final K key, final long value) { if(key.getValueType() != MutableOptionKey.ValueType.LONG) { @@ -130,6 +184,13 @@ protected U setLong( return self(); } + /** + * Get an option of `Long` type. + * + * @param key the key. + * + * @return the value of the option. + */ protected long getLong(final K key) throws NoSuchElementException, NumberFormatException { final MutableOptionValue value = options.get(key); @@ -139,6 +200,14 @@ protected long getLong(final K key) return value.asLong(); } + /** + * Set an option of `int` type. + * + * @param key the key. + * @param value the value. + * + * @return the builder. + */ protected U setInt( final K key, final int value) { if(key.getValueType() != MutableOptionKey.ValueType.INT) { @@ -149,6 +218,13 @@ protected U setInt( return self(); } + /** + * Get an option of `int` type. + * + * @param key the key. + * + * @return the value of the option. + */ protected int getInt(final K key) throws NoSuchElementException, NumberFormatException { final MutableOptionValue value = options.get(key); @@ -158,6 +234,14 @@ protected int getInt(final K key) return value.asInt(); } + /** + * Set an option of `boolean` type. + * + * @param key the key. + * @param value the value. + * + * @return the builder. + */ protected U setBoolean( final K key, final boolean value) { if(key.getValueType() != MutableOptionKey.ValueType.BOOLEAN) { @@ -168,6 +252,13 @@ protected U setBoolean( return self(); } + /** + * Get an option of `boolean` type. + * + * @param key the key. + * + * @return the value of the option. + */ protected boolean getBoolean(final K key) throws NoSuchElementException, NumberFormatException { final MutableOptionValue value = options.get(key); @@ -177,6 +268,14 @@ protected boolean getBoolean(final K key) return value.asBoolean(); } + /** + * Set an option of `int[]` type. + * + * @param key the key. + * @param value the value. + * + * @return the builder. + */ protected U setIntArray( final K key, final int[] value) { if(key.getValueType() != MutableOptionKey.ValueType.INT_ARRAY) { @@ -187,6 +286,13 @@ protected U setIntArray( return self(); } + /** + * Get an option of `int[]` type. + * + * @param key the key. + * + * @return the value of the option. + */ protected int[] getIntArray(final K key) throws NoSuchElementException, NumberFormatException { final MutableOptionValue value = options.get(key); @@ -196,6 +302,14 @@ protected int[] getIntArray(final K key) return value.asIntArray(); } + /** + * Set an option of `String` type. + * + * @param key the key. + * @param value the value. + * + * @return the builder. + */ protected U setString(final K key, final String value) { if (key.getValueType() != MutableOptionKey.ValueType.STRING) { throw new IllegalArgumentException(key + " does not accept a string value"); @@ -204,6 +318,13 @@ protected U setString(final K key, final String value) { return self(); } + /** + * Get an option of `String` type. + * + * @param key the key. + * + * @return the value. + */ protected String getString(final K key) { final MutableOptionValue value = options.get(key); if (value == null) { @@ -212,6 +333,16 @@ protected String getString(final K key) { return value.asString(); } + /** + * Set an option of `Enum[N]` type. + * + * @param the concrete type of the Enum. + * + * @param key the key. + * @param value the value. + * + * @return the builder. + */ protected > U setEnum( final K key, final N value) { if(key.getValueType() != MutableOptionKey.ValueType.ENUM) { @@ -222,6 +353,15 @@ protected > U setEnum( return self(); } + /** + * Get an option of `Enum[N]` type. + * + * @param the concrete type of the Enum. + * + * @param key the key. + * + * @return the value of the option. + */ @SuppressWarnings("unchecked") protected > N getEnum(final K key) throws NoSuchElementException, NumberFormatException { diff --git a/java/src/main/java/org/rocksdb/AbstractRocksIterator.java b/java/src/main/java/org/rocksdb/AbstractRocksIterator.java index b7af848f0c5d..734b202c8a9c 100644 --- a/java/src/main/java/org/rocksdb/AbstractRocksIterator.java +++ b/java/src/main/java/org/rocksdb/AbstractRocksIterator.java @@ -25,6 +25,12 @@ public abstract class AbstractRocksIterator

extends RocksObject implements RocksIteratorInterface { final P parent_; + /** + * Constructs an AbstractRocksIterator. + * + * @param parent the parent object from which the Rocks Iterator was created. + * @param nativeHandle reference to the value of the C++ pointer pointing to the underlying native RocksDB C++ RocksIterator. + */ protected AbstractRocksIterator(final P parent, final long nativeHandle) { super(nativeHandle); diff --git a/java/src/main/java/org/rocksdb/AbstractSlice.java b/java/src/main/java/org/rocksdb/AbstractSlice.java index a73d9c644f17..ad037652ac3f 100644 --- a/java/src/main/java/org/rocksdb/AbstractSlice.java +++ b/java/src/main/java/org/rocksdb/AbstractSlice.java @@ -23,13 +23,23 @@ * the Java @see org.rocksdb.AbstractComparator subclass, it disposes the * C++ BaseComparatorJniCallback subclass, which in turn destroys the * Java @see org.rocksdb.AbstractSlice subclass Objects. + * + * @param the concrete Java type that is wrapped by the subclass of {@link AbstractSlice}. */ public abstract class AbstractSlice extends RocksMutableObject { + /** + * Constructs an AbstractSlice. + */ protected AbstractSlice() { super(); } + /** + * Constructs an AbstractSlice. + * + * @param nativeHandle reference to the value of the C++ pointer pointing to the underlying native RocksDB C++ Slice. + */ protected AbstractSlice(final long nativeHandle) { super(nativeHandle); } @@ -174,6 +184,13 @@ public boolean startsWith(final AbstractSlice prefix) { } } + /** + * Constructs a new Slice from a String. + * + * @param str the string. + * + * @return the handle to the native C++ Slice object. + */ protected static native long createNewSliceFromString(final String str); private static native int size0(long handle); private static native boolean empty0(long handle); @@ -186,6 +203,8 @@ public boolean startsWith(final AbstractSlice prefix) { * Note that this function should be called only after all * RocksDB instances referencing the slice are closed. * Otherwise, an undefined behavior will occur. + * + * @param handle the value of the C++ pointer to the underlying native C++ object. */ @Override protected final void disposeInternal(final long handle) { diff --git a/java/src/main/java/org/rocksdb/AbstractTableFilter.java b/java/src/main/java/org/rocksdb/AbstractTableFilter.java index c696c3e1352e..0c7f994fbee8 100644 --- a/java/src/main/java/org/rocksdb/AbstractTableFilter.java +++ b/java/src/main/java/org/rocksdb/AbstractTableFilter.java @@ -7,6 +7,9 @@ public abstract class AbstractTableFilter extends RocksCallbackObject implements TableFilter { + /** + * Constructs a new AbstractTableFilter. + */ protected AbstractTableFilter() { super(); } diff --git a/java/src/main/java/org/rocksdb/AbstractTransactionNotifier.java b/java/src/main/java/org/rocksdb/AbstractTransactionNotifier.java index 40caaa0854cc..5b6ed1b5b508 100644 --- a/java/src/main/java/org/rocksdb/AbstractTransactionNotifier.java +++ b/java/src/main/java/org/rocksdb/AbstractTransactionNotifier.java @@ -12,6 +12,9 @@ public abstract class AbstractTransactionNotifier extends RocksCallbackObject { + /** + * Constructs an AbstractTransactionNotifier. + */ protected AbstractTransactionNotifier() { super(); } @@ -50,6 +53,15 @@ protected long initializeNative(final long... nativeParameterHandles) { protected void disposeInternal() { disposeInternal(nativeHandle_); } + + /** + * Deletes underlying C++ transaction notifier pointer. + * Note that this function should be called only after all + * RocksDB instances referencing the transaction notifier are closed. + * Otherwise, an undefined behavior will occur. + * + * @param handle the value of the C++ pointer to the underlying native C++ object. + */ protected final void disposeInternal(final long handle) { disposeInternalJni(handle); } diff --git a/java/src/main/java/org/rocksdb/AbstractWriteBatch.java b/java/src/main/java/org/rocksdb/AbstractWriteBatch.java index 41d967f53179..59e253e75b0f 100644 --- a/java/src/main/java/org/rocksdb/AbstractWriteBatch.java +++ b/java/src/main/java/org/rocksdb/AbstractWriteBatch.java @@ -7,9 +7,27 @@ import java.nio.ByteBuffer; +/** + * WriteBatch holds a collection of updates to apply atomically to a DB. + *

+ * The updates are applied in the order in which they are added + * to the WriteBatch. For example, the value of "key" will be "v3" + * after the following batch is written: + *


+ *    batch.put("key", "v1");
+ *    batch.remove("key");
+ *    batch.put("key", "v2");
+ *    batch.put("key", "v3");
+ * 
+ */ public abstract class AbstractWriteBatch extends RocksObject implements WriteBatchInterface { + /** + * Construct an AbstractWriteBatch. + * + * @param nativeHandle reference to the value of the C++ pointer pointing to the underlying native RocksDB C++ Write Batch object. + */ protected AbstractWriteBatch(final long nativeHandle) { super(nativeHandle); } diff --git a/java/src/main/java/org/rocksdb/AdvancedColumnFamilyOptionsInterface.java b/java/src/main/java/org/rocksdb/AdvancedColumnFamilyOptionsInterface.java index 867f5ca959bd..200552634647 100644 --- a/java/src/main/java/org/rocksdb/AdvancedColumnFamilyOptionsInterface.java +++ b/java/src/main/java/org/rocksdb/AdvancedColumnFamilyOptionsInterface.java @@ -12,6 +12,8 @@ * mutable (i.e. present in {@link AdvancedMutableColumnFamilyOptionsInterface}) *

* Taken from include/rocksdb/advanced_options.h + * + * @param the concrete type of the Column Family Options. */ public interface AdvancedColumnFamilyOptionsInterface< T extends AdvancedColumnFamilyOptionsInterface & ColumnFamilyOptionsInterface> { @@ -375,7 +377,7 @@ T setCompactionOptionsFIFO( * even for key hit because they tell us whether to look in that level or go * to the higher level.

* - *

Default: false

+ *

Default: false

* * @param optimizeFiltersForHits boolean value indicating if this flag is set. * @return the reference to the current options. diff --git a/java/src/main/java/org/rocksdb/AdvancedMutableColumnFamilyOptionsInterface.java b/java/src/main/java/org/rocksdb/AdvancedMutableColumnFamilyOptionsInterface.java index 44e61c6d743d..44ebf5b21c81 100644 --- a/java/src/main/java/org/rocksdb/AdvancedMutableColumnFamilyOptionsInterface.java +++ b/java/src/main/java/org/rocksdb/AdvancedMutableColumnFamilyOptionsInterface.java @@ -10,6 +10,8 @@ *

* Taken from include/rocksdb/advanced_options.h * and MutableCFOptions in util/cf_options.h + * + * @param the concrete type of the Column Family Options. */ public interface AdvancedMutableColumnFamilyOptionsInterface< T extends AdvancedMutableColumnFamilyOptionsInterface> { diff --git a/java/src/main/java/org/rocksdb/BackgroundErrorReason.java b/java/src/main/java/org/rocksdb/BackgroundErrorReason.java index eec593d35c54..1c68a7e38dc5 100644 --- a/java/src/main/java/org/rocksdb/BackgroundErrorReason.java +++ b/java/src/main/java/org/rocksdb/BackgroundErrorReason.java @@ -5,10 +5,28 @@ package org.rocksdb; +/** + * Reasons for the background error. + */ public enum BackgroundErrorReason { + /** + * Flush. + */ FLUSH((byte) 0x0), + + /** + * Compaction. + */ COMPACTION((byte) 0x1), + + /** + * Write callback. + */ WRITE_CALLBACK((byte) 0x2), + + /** + * Memtable. + */ MEMTABLE((byte) 0x3); private final byte value; diff --git a/java/src/main/java/org/rocksdb/BackupEngine.java b/java/src/main/java/org/rocksdb/BackupEngine.java index 4ee675ad758e..0b3e567af632 100644 --- a/java/src/main/java/org/rocksdb/BackupEngine.java +++ b/java/src/main/java/org/rocksdb/BackupEngine.java @@ -19,6 +19,11 @@ */ public class BackupEngine extends RocksObject implements AutoCloseable { + /** + * Construct a BackupEngine. + * + * @param nativeHandle reference to the value of the C++ pointer pointing to the underlying native RocksDB C++ backup engine object. + */ protected BackupEngine(final long nativeHandle) { super(nativeHandle); } diff --git a/java/src/main/java/org/rocksdb/BlockBasedTableConfig.java b/java/src/main/java/org/rocksdb/BlockBasedTableConfig.java index df21d774484d..072ccc8fc6c3 100644 --- a/java/src/main/java/org/rocksdb/BlockBasedTableConfig.java +++ b/java/src/main/java/org/rocksdb/BlockBasedTableConfig.java @@ -11,6 +11,10 @@ */ // TODO(AR) should be renamed BlockBasedTableOptions public class BlockBasedTableConfig extends TableFormatConfig { + + /** + * Constructs a new BlockBasedTableConfig. + */ @SuppressWarnings("PMD.NullAssignment") public BlockBasedTableConfig() { //TODO(AR) flushBlockPolicyFactory @@ -859,7 +863,7 @@ public IndexShorteningMode indexShortening() { /** * Set the index shortening mode. - * + *

* See {@link IndexShorteningMode}. * * @param indexShortening the index shortening mode. @@ -937,7 +941,7 @@ public BlockBasedTableConfig setCacheNumShardBits( * * @deprecated This option is now deprecated. No matter what value it * is set to, it will behave as - * if {@link #hashIndexAllowCollision()} == true. + * if {@code setHashIndexAllowCollision(true)} */ @Deprecated public boolean hashIndexAllowCollision() { diff --git a/java/src/main/java/org/rocksdb/BuiltinComparator.java b/java/src/main/java/org/rocksdb/BuiltinComparator.java index 2c89bf218d1d..f4806fe57d72 100644 --- a/java/src/main/java/org/rocksdb/BuiltinComparator.java +++ b/java/src/main/java/org/rocksdb/BuiltinComparator.java @@ -6,15 +6,16 @@ package org.rocksdb; /** - * Builtin RocksDB comparators - * - *

    - *
  1. BYTEWISE_COMPARATOR - Sorts all keys in ascending bytewise - * order.
  2. - *
  3. REVERSE_BYTEWISE_COMPARATOR - Sorts all keys in descending bytewise - * order
  4. - *
+ * Builtin RocksDB comparators. */ public enum BuiltinComparator { - BYTEWISE_COMPARATOR, REVERSE_BYTEWISE_COMPARATOR + /** + * Sorts all keys in ascending byte wise. + */ + BYTEWISE_COMPARATOR, + + /** + * Sorts all keys in descending byte wise order. + */ + REVERSE_BYTEWISE_COMPARATOR } diff --git a/java/src/main/java/org/rocksdb/ByteBufferGetStatus.java b/java/src/main/java/org/rocksdb/ByteBufferGetStatus.java index 4ab9e8475ce9..fead6b2c13b7 100644 --- a/java/src/main/java/org/rocksdb/ByteBufferGetStatus.java +++ b/java/src/main/java/org/rocksdb/ByteBufferGetStatus.java @@ -20,8 +20,19 @@ * {@link RocksDB#multiGetByteBuffers(ReadOptions, List, List, List)} */ public class ByteBufferGetStatus { + /** + * Status of the request to fetch into the buffer. + */ public final Status status; + + /** + * Size of the data, which may be bigger than the buffer. + */ public final int requiredSize; + + /** + * Buffer containing as much of the value as fits. + */ public final ByteBuffer value; /** diff --git a/java/src/main/java/org/rocksdb/Cache.java b/java/src/main/java/org/rocksdb/Cache.java index 04bd3fcaa398..fc814c94beed 100644 --- a/java/src/main/java/org/rocksdb/Cache.java +++ b/java/src/main/java/org/rocksdb/Cache.java @@ -6,7 +6,16 @@ package org.rocksdb; +/** + * Base class for Cache implementations. + */ public abstract class Cache extends RocksObject { + + /** + * Construct a Cache. + * + * @param nativeHandle reference to the value of the C++ pointer pointing to the underlying native RocksDB C++ cache object. + */ protected Cache(final long nativeHandle) { super(nativeHandle); } diff --git a/java/src/main/java/org/rocksdb/CassandraCompactionFilter.java b/java/src/main/java/org/rocksdb/CassandraCompactionFilter.java index 12854c5102be..b452a54c3b73 100644 --- a/java/src/main/java/org/rocksdb/CassandraCompactionFilter.java +++ b/java/src/main/java/org/rocksdb/CassandraCompactionFilter.java @@ -6,10 +6,26 @@ package org.rocksdb; /** - * Just a Java wrapper around CassandraCompactionFilter implemented in C++ + * Just a Java wrapper around CassandraCompactionFilter implemented in C++. + *

+ * Compaction filter for removing expired Cassandra data with ttl. + * Is also in charge of removing tombstone that has been + * promoted to kValue type after serials of merging in compaction. */ public class CassandraCompactionFilter extends AbstractCompactionFilter { + + /** + * Constructs a new CasandraCompactionFilter. + * + * @param purgeTtlOnExpiration if set to true, expired data will be directly purged, + * otherwise expired data will be converted to tombstones + * first and then be eventually removed after + * {@code gcGracePeriodInSeconds}. Should only be on in + * the case that all the writes have the same ttl setting, + * otherwise it could bring old data back. + * @param gcGracePeriodInSeconds the grace period in seconds for gc. + */ public CassandraCompactionFilter( final boolean purgeTtlOnExpiration, final int gcGracePeriodInSeconds) { super(createNewCassandraCompactionFilter0(purgeTtlOnExpiration, gcGracePeriodInSeconds)); diff --git a/java/src/main/java/org/rocksdb/CassandraValueMergeOperator.java b/java/src/main/java/org/rocksdb/CassandraValueMergeOperator.java index cdb82ee43473..c6e87b9e836d 100644 --- a/java/src/main/java/org/rocksdb/CassandraValueMergeOperator.java +++ b/java/src/main/java/org/rocksdb/CassandraValueMergeOperator.java @@ -6,14 +6,28 @@ package org.rocksdb; /** + * Just a Java wrapper around CassandraValueMergeOperator implemented in C++. + *

* CassandraValueMergeOperator is a merge operator that merges two cassandra wide column * values. */ public class CassandraValueMergeOperator extends MergeOperator { + + /** + * Constructs a new CassandraValueMergeOperator. + * + * @param gcGracePeriodInSeconds the grace period in seconds for gc. + */ public CassandraValueMergeOperator(final int gcGracePeriodInSeconds) { super(newSharedCassandraValueMergeOperator(gcGracePeriodInSeconds, 0)); } + /** + * Constructs a new CassandraValueMergeOperator. + * + * @param gcGracePeriodInSeconds the grace period in seconds for gc. + * @param operandsLimit the maximum size of the operands list before merge is applied. + */ public CassandraValueMergeOperator(final int gcGracePeriodInSeconds, final int operandsLimit) { super(newSharedCassandraValueMergeOperator(gcGracePeriodInSeconds, operandsLimit)); } diff --git a/java/src/main/java/org/rocksdb/Checkpoint.java b/java/src/main/java/org/rocksdb/Checkpoint.java index e50068a6e32c..4cdfe0ff20e7 100644 --- a/java/src/main/java/org/rocksdb/Checkpoint.java +++ b/java/src/main/java/org/rocksdb/Checkpoint.java @@ -50,6 +50,22 @@ public void createCheckpoint(final String checkpointPath) createCheckpoint(nativeHandle_, checkpointPath); } + /** + * Exports all live SST files of a specified Column Family into {@code exportPath}. + *

+ * Always triggers a flush. + * + * @param columnFamilyHandle the column family to export. + * + * @param exportPath should not already exist and will be created by this API. + * SST files will be created as hard links when the directory specified + * is in the same partition as the db directory, copied otherwise. + * + * @return metadata about the exported SST files. + * + * @throws RocksDBException thrown if an error occurs within the native + * part of the library. + */ public ExportImportFilesMetaData exportColumnFamily(final ColumnFamilyHandle columnFamilyHandle, final String exportPath) throws RocksDBException { return new ExportImportFilesMetaData( diff --git a/java/src/main/java/org/rocksdb/ChecksumType.java b/java/src/main/java/org/rocksdb/ChecksumType.java index 5b3d2249250f..556220f8baa5 100644 --- a/java/src/main/java/org/rocksdb/ChecksumType.java +++ b/java/src/main/java/org/rocksdb/ChecksumType.java @@ -14,18 +14,20 @@ public enum ChecksumType { */ kNoChecksum((byte) 0), /** - * CRC32 Checksum + * CRC32 Checksum. */ kCRC32c((byte) 1), /** - * XX Hash + * XX Hash. */ kxxHash((byte) 2), /** - * XX Hash 64 + * XX Hash 64. */ kxxHash64((byte) 3), - + /** + * XX Hash v3. + */ kXXH3((byte) 4); /** diff --git a/java/src/main/java/org/rocksdb/ClockCache.java b/java/src/main/java/org/rocksdb/ClockCache.java index afbd7f75532c..171f5596a90f 100644 --- a/java/src/main/java/org/rocksdb/ClockCache.java +++ b/java/src/main/java/org/rocksdb/ClockCache.java @@ -14,6 +14,7 @@ * configuration parameter that is not provided by this API. This function * simply returns a new LRUCache for functional compatibility. */ +@Deprecated public class ClockCache extends Cache { /** * Create a new cache with a fixed size capacity. @@ -22,6 +23,7 @@ public class ClockCache extends Cache { * * @param capacity The fixed size capacity of the cache */ + @Deprecated public ClockCache(final long capacity) { super(newClockCache(capacity, -1, false)); } @@ -39,6 +41,7 @@ public ClockCache(final long capacity) { * @param numShardBits The cache is sharded to 2^numShardBits shards, * by hash of the key */ + @Deprecated public ClockCache(final long capacity, final int numShardBits) { super(newClockCache(capacity, numShardBits, false)); } @@ -58,6 +61,7 @@ public ClockCache(final long capacity, final int numShardBits) { * by hash of the key * @param strictCapacityLimit insert to the cache will fail when cache is full */ + @Deprecated public ClockCache(final long capacity, final int numShardBits, final boolean strictCapacityLimit) { super(newClockCache(capacity, numShardBits, strictCapacityLimit)); diff --git a/java/src/main/java/org/rocksdb/ColumnFamilyHandle.java b/java/src/main/java/org/rocksdb/ColumnFamilyHandle.java index 00bff0b07307..054d35adf23d 100644 --- a/java/src/main/java/org/rocksdb/ColumnFamilyHandle.java +++ b/java/src/main/java/org/rocksdb/ColumnFamilyHandle.java @@ -123,6 +123,11 @@ public int hashCode() { } } + /** + * Returns true if this is the handle for the default column family. + * + * @return true if this is the handle for the default column family, false otherwise. + */ protected boolean isDefaultColumnFamily() { return nativeHandle_ == rocksDB_.getDefaultColumnFamily().nativeHandle_; } diff --git a/java/src/main/java/org/rocksdb/ColumnFamilyOptionsInterface.java b/java/src/main/java/org/rocksdb/ColumnFamilyOptionsInterface.java index 4776773bd8bd..40c7c5806409 100644 --- a/java/src/main/java/org/rocksdb/ColumnFamilyOptionsInterface.java +++ b/java/src/main/java/org/rocksdb/ColumnFamilyOptionsInterface.java @@ -8,6 +8,11 @@ import java.util.Collection; import java.util.List; +/** + * Interface for Column Family Options. + * + * @param the concrete type of the ColumnFamilyOptions. + */ public interface ColumnFamilyOptionsInterface> extends AdvancedColumnFamilyOptionsInterface { /** diff --git a/java/src/main/java/org/rocksdb/CompactRangeOptions.java b/java/src/main/java/org/rocksdb/CompactRangeOptions.java index ba5fa6455d27..710c9614caa7 100644 --- a/java/src/main/java/org/rocksdb/CompactRangeOptions.java +++ b/java/src/main/java/org/rocksdb/CompactRangeOptions.java @@ -17,9 +17,11 @@ public class CompactRangeOptions extends RocksObject { private static final byte VALUE_kForce = 2; private static final byte VALUE_kForceOptimized = 3; - // For level based compaction, we can configure if we want to skip/force bottommost level - // compaction. The order of this enum MUST follow the C++ layer. See BottommostLevelCompaction in - // db/options.h + /** + * For level based compaction, we can configure if we want to skip/force bottommost level + * compaction. The order of this enum MUST follow the C++ layer. See BottommostLevelCompaction in + * db/options.h + */ public enum BottommostLevelCompaction { /** * Skip bottommost level compaction @@ -71,15 +73,34 @@ public static BottommostLevelCompaction fromRocksId(final int bottommostLevelCom } } + /** + * Timestamp. + */ public static class Timestamp { + /** + * the start. + */ public final long start; + + /** + * the range. + */ public final long range; - public Timestamp(final long start, final long duration) { + /** + * Constructs a Timestamp. + * + * @param start the start. + * @param range the range. + */ + public Timestamp(final long start, final long range) { this.start = start; - this.range = duration; + this.range = range; } + /** + * Constructs a Timestamp. + */ public Timestamp() { this.start = 0; this.range = 0; @@ -250,22 +271,46 @@ public CompactRangeOptions setMaxSubcompactions(final int maxSubcompactions) { return this; } + /** + * Set Full History Low Timestamp; + * + * @param tsLow low timestamp. + * + * @return This CompactRangeOptions. + */ public CompactRangeOptions setFullHistoryTSLow(final Timestamp tsLow) { setFullHistoryTSLow(nativeHandle_, tsLow.start, tsLow.range); return this; } + /** + * Get the Full History Low Timestamp; + * + * @return low timestamp. + */ public Timestamp fullHistoryTSLow() { return fullHistoryTSLow(nativeHandle_); } - public CompactRangeOptions setCanceled(final boolean canceled) { - setCanceled(nativeHandle_, canceled); + /** + * Set cancelled. + * + * @param cancelled true to cancel, otherwise false. + * + * @return This CompactRangeOptions. + */ + public CompactRangeOptions setCancelled(final boolean cancelled) { + setCancelled(nativeHandle_, cancelled); return this; } - public boolean canceled() { - return canceled(nativeHandle_); + /** + * Get the cancelled status. + * + * @return true if cancelled, false otherwise. + */ + public boolean cancelled() { + return cancelled(nativeHandle_); } private static native long newCompactRangeOptions(); @@ -297,7 +342,7 @@ private static native void setFullHistoryTSLow( private static native Timestamp fullHistoryTSLow(final long handle); - private static native void setCanceled(final long handle, final boolean canceled); + private static native void setCancelled(final long handle, final boolean canceled); - private static native boolean canceled(final long handle); + private static native boolean cancelled(final long handle); } diff --git a/java/src/main/java/org/rocksdb/CompactionJobInfo.java b/java/src/main/java/org/rocksdb/CompactionJobInfo.java index 29369f174a1a..309ca53067fa 100644 --- a/java/src/main/java/org/rocksdb/CompactionJobInfo.java +++ b/java/src/main/java/org/rocksdb/CompactionJobInfo.java @@ -9,8 +9,14 @@ import java.util.List; import java.util.Map; +/** + * Information about a Compaction Job. + */ public class CompactionJobInfo extends RocksObject { + /** + * Constructs a new CompactionJobInfo. + */ public CompactionJobInfo() { super(newCompactionJobInfo()); } diff --git a/java/src/main/java/org/rocksdb/CompactionJobStats.java b/java/src/main/java/org/rocksdb/CompactionJobStats.java index 857de7b62430..fb5c950ee859 100644 --- a/java/src/main/java/org/rocksdb/CompactionJobStats.java +++ b/java/src/main/java/org/rocksdb/CompactionJobStats.java @@ -5,8 +5,14 @@ package org.rocksdb; +/** + * Statistics about a Compaction Job. + */ public class CompactionJobStats extends RocksObject { + /** + * Constructs a new CompactionJobStats. + */ public CompactionJobStats() { super(newCompactionJobStats()); } @@ -118,7 +124,7 @@ public long totalOutputBytes() { /** * Get the number of records being replaced by newer record associated * with same key. - * + *

* This could be a new value or a deletion entry for that key so this field * sums up all updated and deleted keys. * @@ -149,7 +155,7 @@ public long totalInputRawValueBytes() { /** * Get the number of deletion entries before compaction. - * + *

* Deletion entries can disappear after compaction because they expired. * * @return the number of deletion entries before compaction. @@ -182,7 +188,7 @@ public long numCorruptKeys() { /** * Get the Time spent on file's Append() call. - * + *

* Only populated if {@link ColumnFamilyOptions#reportBgIoStats()} is set. * * @return the Time spent on file's Append() call. @@ -193,7 +199,7 @@ public long fileWriteNanos() { /** * Get the Time spent on sync file range. - * + *

* Only populated if {@link ColumnFamilyOptions#reportBgIoStats()} is set. * * @return the Time spent on sync file range. @@ -204,7 +210,7 @@ public long fileRangeSyncNanos() { /** * Get the Time spent on file fsync. - * + *

* Only populated if {@link ColumnFamilyOptions#reportBgIoStats()} is set. * * @return the Time spent on file fsync. @@ -215,7 +221,7 @@ public long fileFsyncNanos() { /** * Get the Time spent on preparing file write (falocate, etc) - * + *

* Only populated if {@link ColumnFamilyOptions#reportBgIoStats()} is set. * * @return the Time spent on preparing file write (falocate, etc). diff --git a/java/src/main/java/org/rocksdb/CompactionOptions.java b/java/src/main/java/org/rocksdb/CompactionOptions.java index 08cbdf6378b3..5cb791806013 100644 --- a/java/src/main/java/org/rocksdb/CompactionOptions.java +++ b/java/src/main/java/org/rocksdb/CompactionOptions.java @@ -14,13 +14,16 @@ */ public class CompactionOptions extends RocksObject { + /** + * Constructs a new CompactionOptions. + */ public CompactionOptions() { super(newCompactionOptions()); } /** * Get the compaction output compression type. - * + *

* See {@link #setCompression(CompressionType)}. * * @return the compression type. @@ -32,9 +35,9 @@ public CompressionType compression() { /** * Set the compaction output compression type. - * + *

* Default: snappy - * + *

* If set to {@link CompressionType#DISABLE_COMPRESSION_OPTION}, * RocksDB will choose compression type according to the * {@link ColumnFamilyOptions#compressionType()}, taking into account @@ -52,7 +55,7 @@ public CompactionOptions setCompression(final CompressionType compression) { /** * Get the compaction output file size limit. - * + *

* See {@link #setOutputFileSizeLimit(long)}. * * @return the file size limit. @@ -63,7 +66,7 @@ public long outputFileSizeLimit() { /** * Compaction will create files of size {@link #outputFileSizeLimit()}. - * + *

* Default: 2^64-1, which means that compaction will create a single file * * @param outputFileSizeLimit the size limit @@ -90,9 +93,9 @@ public int maxSubcompactions() { * This value represents the maximum number of threads that will * concurrently perform a compaction job by breaking it into multiple, * smaller ones that are run simultaneously. - * + *

* Default: 0 (i.e. no subcompactions) - * + *

* If > 0, it will replace the option in * {@link DBOptions#maxSubcompactions()} for this compaction. * diff --git a/java/src/main/java/org/rocksdb/CompactionOptionsFIFO.java b/java/src/main/java/org/rocksdb/CompactionOptionsFIFO.java index 24ebe0da2ff1..4d359dd814a2 100644 --- a/java/src/main/java/org/rocksdb/CompactionOptionsFIFO.java +++ b/java/src/main/java/org/rocksdb/CompactionOptionsFIFO.java @@ -10,6 +10,9 @@ */ public class CompactionOptionsFIFO extends RocksObject { + /** + * Constructs a new CompactionOptionsFIFO. + */ public CompactionOptionsFIFO() { super(newCompactionOptionsFIFO()); } diff --git a/java/src/main/java/org/rocksdb/CompactionOptionsUniversal.java b/java/src/main/java/org/rocksdb/CompactionOptionsUniversal.java index f18915b8f569..191234c4c649 100644 --- a/java/src/main/java/org/rocksdb/CompactionOptionsUniversal.java +++ b/java/src/main/java/org/rocksdb/CompactionOptionsUniversal.java @@ -10,6 +10,9 @@ */ public class CompactionOptionsUniversal extends RocksObject { + /** + * Constructs a new CompactionOptionsUniversal. + */ public CompactionOptionsUniversal() { super(newCompactionOptionsUniversal()); } diff --git a/java/src/main/java/org/rocksdb/CompactionReason.java b/java/src/main/java/org/rocksdb/CompactionReason.java index 46ec33f3f141..a6050c716a18 100644 --- a/java/src/main/java/org/rocksdb/CompactionReason.java +++ b/java/src/main/java/org/rocksdb/CompactionReason.java @@ -5,7 +5,14 @@ package org.rocksdb; +/** + * Reasons for compaction. + */ public enum CompactionReason { + + /** + * Unknown. + */ kUnknown((byte)0x0), /** diff --git a/java/src/main/java/org/rocksdb/CompactionStyle.java b/java/src/main/java/org/rocksdb/CompactionStyle.java index 7b955a7a248c..6a1de336abfb 100644 --- a/java/src/main/java/org/rocksdb/CompactionStyle.java +++ b/java/src/main/java/org/rocksdb/CompactionStyle.java @@ -35,9 +35,24 @@ * FIFO Compaction */ public enum CompactionStyle { + /** + * Level Compaction. + */ LEVEL((byte) 0x0), + + /** + * Universal Compaction. + */ UNIVERSAL((byte) 0x1), + + /** + * First-in First-out Compaction. + */ FIFO((byte) 0x2), + + /** + * No compaction. + */ NONE((byte) 0x3); private final byte value; diff --git a/java/src/main/java/org/rocksdb/ComparatorOptions.java b/java/src/main/java/org/rocksdb/ComparatorOptions.java index da287b51816b..3247a85601a5 100644 --- a/java/src/main/java/org/rocksdb/ComparatorOptions.java +++ b/java/src/main/java/org/rocksdb/ComparatorOptions.java @@ -13,6 +13,9 @@ * instance becomes out-of-scope to release the allocated memory in C++. */ public class ComparatorOptions extends RocksObject { + /** + * Constructs a new ComparatorOptions. + */ public ComparatorOptions() { super(newComparatorOptions()); } diff --git a/java/src/main/java/org/rocksdb/CompressionOptions.java b/java/src/main/java/org/rocksdb/CompressionOptions.java index e6316af451e6..53f1480cda08 100644 --- a/java/src/main/java/org/rocksdb/CompressionOptions.java +++ b/java/src/main/java/org/rocksdb/CompressionOptions.java @@ -10,33 +10,93 @@ */ public class CompressionOptions extends RocksObject { + /** + * RocksDB's generic default compression level. Internally it'll be translated + * to the default compression level specific to the library being used. + */ + public static final int DEFAULT_COMPRESSION_LEVEL = 32767; + + /** + * Constructs a new CompressionOptions. + */ public CompressionOptions() { super(newCompressionOptions()); } + /** + * Set the Window size. + * Zlib only. + * + * @param windowBits the size of the window. + * + * @return the reference to the current compression options. + */ public CompressionOptions setWindowBits(final int windowBits) { setWindowBits(nativeHandle_, windowBits); return this; } + /** + * Get the Window size. + * Zlib only. + * + * @return the size of the window. + */ public int windowBits() { return windowBits(nativeHandle_); } + /** + * Compression "level" applicable to zstd, zlib, LZ4, and LZ4HC. Except for + * {@link #DEFAULT_COMPRESSION_LEVEL}, the meaning of each value depends + * on the compression algorithm. Decreasing across non- + * {@link #DEFAULT_COMPRESSION_LEVEL} values will either favor speed over + * compression ratio or have no effect. + *

+ * In LZ4 specifically, the absolute value of a negative `level` internally + * configures the `acceleration` parameter. For example, set `level=-10` for + * `acceleration=10`. This negation is necessary to ensure decreasing `level` + * values favor speed over compression ratio. + * + * @param level the compression level. + * + * @return the reference to the current compression options. + */ public CompressionOptions setLevel(final int level) { setLevel(nativeHandle_, level); return this; } + /** + * Get the Compression "level". + *

+ * See {@link #setLevel(int)} + * + * @return the compression level. + */ public int level() { return level(nativeHandle_); } + /** + * Set the compression strategy. + * Zlib only. + * + * @param strategy the strategy. + * + * @return the reference to the current compression options. + */ public CompressionOptions setStrategy(final int strategy) { setStrategy(nativeHandle_, strategy); return this; } + /** + * Get the compression strategy. + * Zlib only. + * + * @return the strategy. + */ public int strategy() { return strategy(nativeHandle_); } diff --git a/java/src/main/java/org/rocksdb/CompressionType.java b/java/src/main/java/org/rocksdb/CompressionType.java index d1ecf0ac84c5..4f683d036735 100644 --- a/java/src/main/java/org/rocksdb/CompressionType.java +++ b/java/src/main/java/org/rocksdb/CompressionType.java @@ -14,14 +14,49 @@ * compression method (if any) is used to compress a block.

*/ public enum CompressionType { + /** + * No compression. + */ NO_COMPRESSION((byte) 0x0, null, "kNoCompression"), + + /** + * Snappy compression. + */ SNAPPY_COMPRESSION((byte) 0x1, "snappy", "kSnappyCompression"), + + /** + * ZLib compression. + */ ZLIB_COMPRESSION((byte) 0x2, "z", "kZlibCompression"), + + /** + * BZ2 compression. + */ BZLIB2_COMPRESSION((byte) 0x3, "bzip2", "kBZip2Compression"), + + /** + * LZ4 compression. + */ LZ4_COMPRESSION((byte) 0x4, "lz4", "kLZ4Compression"), + + /** + * LZ4 with high compression. + */ LZ4HC_COMPRESSION((byte) 0x5, "lz4hc", "kLZ4HCCompression"), + + /** + * Microsoft XPress compression (Windows only). + */ XPRESS_COMPRESSION((byte) 0x6, "xpress", "kXpressCompression"), + + /** + * ZStd compression. + */ ZSTD_COMPRESSION((byte) 0x7, "zstd", "kZSTD"), + + /** + * Disable compression. + */ DISABLE_COMPRESSION_OPTION((byte) 0x7F, null, "kDisableCompressionOption"); /** diff --git a/java/src/main/java/org/rocksdb/ConcurrentTaskLimiter.java b/java/src/main/java/org/rocksdb/ConcurrentTaskLimiter.java index b4e34303b5f3..33bbcd5ba41e 100644 --- a/java/src/main/java/org/rocksdb/ConcurrentTaskLimiter.java +++ b/java/src/main/java/org/rocksdb/ConcurrentTaskLimiter.java @@ -6,7 +6,15 @@ package org.rocksdb; +/** + * Base class for Concurrent Task Limiters. + */ public abstract class ConcurrentTaskLimiter extends RocksObject { + /** + * Constructs a ConcurrentTaskLimiter. + * + * @param nativeHandle reference to the value of the C++ pointer pointing to the underlying native RocksDB C++ concurrent task limiter object. + */ protected ConcurrentTaskLimiter(final long nativeHandle) { super(nativeHandle); } diff --git a/java/src/main/java/org/rocksdb/ConcurrentTaskLimiterImpl.java b/java/src/main/java/org/rocksdb/ConcurrentTaskLimiterImpl.java index 1c496ff2f5c8..145eef11bba6 100644 --- a/java/src/main/java/org/rocksdb/ConcurrentTaskLimiterImpl.java +++ b/java/src/main/java/org/rocksdb/ConcurrentTaskLimiterImpl.java @@ -6,7 +6,17 @@ package org.rocksdb; +/** + * Concurrent Task Limiter. + */ public class ConcurrentTaskLimiterImpl extends ConcurrentTaskLimiter { + + /** + * Construct a new Concurrent Task Limiter. + * + * @param name the name of the limiter. + * @param maxOutstandingTask the maximum concurrent tasks. + */ public ConcurrentTaskLimiterImpl(final String name, final int maxOutstandingTask) { super(newConcurrentTaskLimiterImpl0(name, maxOutstandingTask)); } diff --git a/java/src/main/java/org/rocksdb/ConfigOptions.java b/java/src/main/java/org/rocksdb/ConfigOptions.java index 4717750b7f4b..c846f6892854 100644 --- a/java/src/main/java/org/rocksdb/ConfigOptions.java +++ b/java/src/main/java/org/rocksdb/ConfigOptions.java @@ -6,33 +6,72 @@ package org.rocksdb; +/** + * Configuration options. + */ public class ConfigOptions extends RocksObject { /** - * Construct with default Options + * Constructs a new ConfigOptions. */ public ConfigOptions() { super(newConfigOptionsInstance()); } + /** + * Set the delimiter used between options. + * + * @param delimiter the delimiter + * + * @return the reference to the current options + */ public ConfigOptions setDelimiter(final String delimiter) { setDelimiter(nativeHandle_, delimiter); return this; } + + /** + * Set whether to ignore unknown options. + * + * @param ignore true to ignore unknown options, otherwise raise an error. + * + * @return the reference to the current options + */ public ConfigOptions setIgnoreUnknownOptions(final boolean ignore) { setIgnoreUnknownOptions(nativeHandle_, ignore); return this; } + /** + * Set the environment. + * + * @param env the environment. + * + * @return the reference to the current options + */ public ConfigOptions setEnv(final Env env) { setEnv(nativeHandle_, env.nativeHandle_); return this; } + /** + * Set whether to escape input strings. + * + * @param escaped true to escape input strings, false otherwise. + * + * @return the reference to the current options + */ public ConfigOptions setInputStringsEscaped(final boolean escaped) { setInputStringsEscaped(nativeHandle_, escaped); return this; } + /** + * Set the sanity level. + * + * @param level the sanity level. + * + * @return the reference to the current options + */ public ConfigOptions setSanityLevel(final SanityLevel level) { setSanityLevel(nativeHandle_, level.getValue()); return this; diff --git a/java/src/main/java/org/rocksdb/DBOptionsInterface.java b/java/src/main/java/org/rocksdb/DBOptionsInterface.java index bc9d9acbd65e..f7a915eeabde 100644 --- a/java/src/main/java/org/rocksdb/DBOptionsInterface.java +++ b/java/src/main/java/org/rocksdb/DBOptionsInterface.java @@ -8,6 +8,11 @@ import java.util.Collection; import java.util.List; +/** + * Interface for DB Options. + * + * @param the concrete type of DBOptions. + */ public interface DBOptionsInterface> { /** * Use this if your DB is very small (like under 1GB) and you don't want to @@ -78,8 +83,7 @@ public interface DBOptionsInterface> { * * @param flag a flag indicating if missing column families shall be * created automatically. - * @return true if missing column families shall be created automatically - * on open. + * @return the instance of the current Options */ T setCreateMissingColumnFamilies(boolean flag); @@ -159,7 +163,7 @@ public interface DBOptionsInterface> { /** * Use to track SST files and control their file deletion rate. - * + *

* Features: * - Throttle the deletion rate of the SST files. * - Keep track the total size of all SST files. @@ -167,7 +171,7 @@ public interface DBOptionsInterface> { * the DB wont do any further flushes or compactions and will set the * background error. * - Can be shared between multiple dbs. - * + *

* Limitations: * - Only track and throttle deletes of SST files in * first db_path (db_name if db_paths is empty). @@ -208,7 +212,7 @@ public interface DBOptionsInterface> { * If {@link MutableDBOptionsInterface#maxOpenFiles()} is -1, DB will open * all files on DB::Open(). You can use this option to increase the number * of threads used to open the files. - * + *

* Default: 16 * * @param maxFileOpeningThreads the maximum number of threads to use to @@ -222,7 +226,7 @@ public interface DBOptionsInterface> { * If {@link MutableDBOptionsInterface#maxOpenFiles()} is -1, DB will open all * files on DB::Open(). You can use this option to increase the number of * threads used to open the files. - * + *

* Default: 16 * * @return the maximum number of threads to use to open files @@ -278,27 +282,27 @@ public interface DBOptionsInterface> { * A list of paths where SST files can be put into, with its target size. * Newer data is placed into paths specified earlier in the vector while * older data gradually moves to paths specified later in the vector. - * + *

* For example, you have a flash device with 10GB allocated for the DB, * as well as a hard drive of 2TB, you should config it to be: * [{"/flash_path", 10GB}, {"/hard_drive", 2TB}] - * + *

* The system will try to guarantee data under each path is close to but * not larger than the target size. But current and future file sizes used * by determining where to place a file are based on best-effort estimation, * which means there is a chance that the actual size under the directory * is slightly more than target size under some workloads. User should give * some buffer room for those cases. - * + *

* If none of the paths has sufficient room to place a file, the file will * be placed to the last path anyway, despite to the target size. - * + *

* Placing newer data to earlier paths is also best-efforts. User should * expect user files to be placed in higher levels in some extreme cases. - * + *

* If left empty, only one path will be used, which is db_name passed when * opening the DB. - * + *

* Default: empty * * @param dbPaths the paths and target sizes @@ -311,27 +315,27 @@ public interface DBOptionsInterface> { * A list of paths where SST files can be put into, with its target size. * Newer data is placed into paths specified earlier in the vector while * older data gradually moves to paths specified later in the vector. - * + *

* For example, you have a flash device with 10GB allocated for the DB, * as well as a hard drive of 2TB, you should config it to be: * [{"/flash_path", 10GB}, {"/hard_drive", 2TB}] - * + *

* The system will try to guarantee data under each path is close to but * not larger than the target size. But current and future file sizes used * by determining where to place a file are based on best-effort estimation, * which means there is a chance that the actual size under the directory * is slightly more than target size under some workloads. User should give * some buffer room for those cases. - * + *

* If none of the paths has sufficient room to place a file, the file will * be placed to the last path anyway, despite to the target size. - * + *

* Placing newer data to earlier paths is also best-efforts. User should * expect user files to be placed in higher levels in some extreme cases. - * + *

* If left empty, only one path will be used, which is db_name passed when * opening the DB. - * + *

* Default: {@link java.util.Collections#emptyList()} * * @return dbPaths the paths and target sizes @@ -352,7 +356,7 @@ public interface DBOptionsInterface> { /** * Returns the directory of info log. - * + *

* If it is empty, the log files will be in the same dir as data. * If it is non empty, the log files will be in the specified dir, * and the db data dir's absolute path will be used as the log file @@ -377,7 +381,7 @@ public interface DBOptionsInterface> { /** * Returns the path to the write-ahead-logs (WAL) directory. - * + *

* If it is empty, the log files will be in the same dir as data, * dbname is used as the data dir by default * If it is non empty, the log files will be in kept the specified dir. @@ -439,7 +443,7 @@ public interface DBOptionsInterface> { * `max_background_jobs = max_background_compactions + max_background_flushes` * in the case where user sets at least one of `max_background_compactions` or * `max_background_flushes`. - * + *

* Specifies the maximum number of concurrent background flush jobs. * If you're increasing this, also consider increasing number of threads in * HIGH priority thread pool. For more information, see @@ -463,7 +467,7 @@ public interface DBOptionsInterface> { * `max_background_jobs = max_background_compactions + max_background_flushes` * in the case where user sets at least one of `max_background_compactions` or * `max_background_flushes`. - * + *

* Returns the maximum number of concurrent background flush jobs. * If you're increasing this, also consider increasing number of threads in * HIGH priority thread pool. For more information, see @@ -542,16 +546,16 @@ public interface DBOptionsInterface> { /** * Recycle log files. - * + *

* If non-zero, we will reuse previously written log files for new * logs, overwriting the old data. The value indicates how many * such files we will keep around at any point in time for later * use. - * + *

* This is more efficient because the blocks are already * allocated and fdatasync does not need to update the inode after * each write. - * + *

* Default: 0 * * @param recycleLogFileNum the number of log files to keep for recycling @@ -562,16 +566,16 @@ public interface DBOptionsInterface> { /** * Recycle log files. - * + *

* If non-zero, we will reuse previously written log files for new * logs, overwriting the old data. The value indicates how many * such files we will keep around at any point in time for later * use. - * + *

* This is more efficient because the blocks are already * allocated and fdatasync does not need to update the inode after * each write. - * + *

* Default: 0 * * @return the number of log files kept for recycling @@ -617,17 +621,17 @@ public interface DBOptionsInterface> { /** * {@link #walTtlSeconds()} and {@link #walSizeLimitMB()} affect when WALs * will be archived and deleted. - * + *

* When both are zero, obsolete WALs will not be archived and will be deleted * immediately. Otherwise, obsolete WALs will be archived prior to deletion. - * + *

* When `WAL_size_limit_MB` is nonzero, archived WALs starting with the * earliest will be deleted until the total size of the archive falls below * this limit. All empty WALs will be deleted. - * + *

* When `WAL_ttl_seconds` is nonzero, archived WALs older than * `WAL_ttl_seconds` will be deleted. - * + *

* When only `WAL_ttl_seconds` is nonzero, the frequency at which archived * WALs are deleted is every `WAL_ttl_seconds / 2` seconds. When only * `WAL_size_limit_MB` is nonzero, the deletion frequency is every ten @@ -643,17 +647,17 @@ public interface DBOptionsInterface> { /** * WalTtlSeconds() and walSizeLimitMB() affect when WALs will be archived and * deleted. - * + *

* When both are zero, obsolete WALs will not be archived and will be deleted * immediately. Otherwise, obsolete WALs will be archived prior to deletion. - * + *

* When `WAL_size_limit_MB` is nonzero, archived WALs starting with the * earliest will be deleted until the total size of the archive falls below * this limit. All empty WALs will be deleted. - * + *

* When `WAL_ttl_seconds` is nonzero, archived WALs older than * `WAL_ttl_seconds` will be deleted. - * + *

* When only `WAL_ttl_seconds` is nonzero, the frequency at which archived * WALs are deleted is every `WAL_ttl_seconds / 2` seconds. When only * `WAL_size_limit_MB` is nonzero, the deletion frequency is every ten @@ -668,17 +672,17 @@ public interface DBOptionsInterface> { /** * WalTtlSeconds() and walSizeLimitMB() affect how archived logs * will be deleted. - * + *

* When both are zero, obsolete WALs will not be archived and will be deleted * immediately. Otherwise, obsolete WALs will be archived prior to deletion. - * + *

* When `WAL_size_limit_MB` is nonzero, archived WALs starting with the * earliest will be deleted until the total size of the archive falls below * this limit. All empty WALs will be deleted. - * + *

* When `WAL_ttl_seconds` is nonzero, archived WALs older than * `WAL_ttl_seconds` will be deleted. - * + *

* When only `WAL_ttl_seconds` is nonzero, the frequency at which archived * WALs are deleted is every `WAL_ttl_seconds / 2` seconds. When only * `WAL_size_limit_MB` is nonzero, the deletion frequency is every ten @@ -694,17 +698,17 @@ public interface DBOptionsInterface> { /** * WalTtlSeconds() and walSizeLimitMB() affect when WALs will be archived and * deleted. - * + *

* When both are zero, obsolete WALs will not be archived and will be deleted * immediately. Otherwise, obsolete WALs will be archived prior to deletion. - * + *

* When `WAL_size_limit_MB` is nonzero, archived WALs starting with the * earliest will be deleted until the total size of the archive falls below * this limit. All empty WALs will be deleted. - * + *

* When `WAL_ttl_seconds` is nonzero, archived WALs older than * `WAL_ttl_seconds` will be deleted. - * + *

* When only `WAL_ttl_seconds` is nonzero, the frequency at which archived * WALs are deleted is every `WAL_ttl_seconds / 2` seconds. When only * `WAL_size_limit_MB` is nonzero, the deletion frequency is every ten @@ -720,7 +724,7 @@ public interface DBOptionsInterface> { * The maximum limit of number of bytes that are written in a single batch * of WAL or memtable write. It is followed when the leader write size * is larger than 1/8 of this limit. - * + *

* Default: 1 MB * * @param maxWriteBatchGroupSizeBytes the maximum limit of number of bytes, see description. @@ -732,7 +736,7 @@ public interface DBOptionsInterface> { * The maximum limit of number of bytes that are written in a single batch * of WAL or memtable write. It is followed when the leader write size * is larger than 1/8 of this limit. - * + *

* Default: 1 MB * * @return the maximum limit of number of bytes, see description. @@ -885,13 +889,13 @@ public interface DBOptionsInterface> { /** * Amount of data to build up in memtables across all column * families before writing to disk. - * + *

* This is distinct from {@link ColumnFamilyOptions#writeBufferSize()}, * which enforces a limit for a single memtable. - * + *

* This feature is disabled by default. Specify a non-zero value * to enable it. - * + *

* Default: 0 (disabled) * * @param dbWriteBufferSize the size of the write buffer @@ -903,7 +907,7 @@ public interface DBOptionsInterface> { /** * Use passed {@link WriteBufferManager} to control memory usage across * multiple column families and/or DB instances. - * + *

* Check * https://github.com/facebook/rocksdb/wiki/Write-Buffer-Manager * for more details on when to use it @@ -925,13 +929,13 @@ public interface DBOptionsInterface> { /** * Amount of data to build up in memtables across all column * families before writing to disk. - * + *

* This is distinct from {@link ColumnFamilyOptions#writeBufferSize()}, * which enforces a limit for a single memtable. - * + *

* This feature is disabled by default. Specify a non-zero value * to enable it. - * + *

* Default: 0 (disabled) * * @return the size of the write buffer @@ -964,7 +968,7 @@ public interface DBOptionsInterface> { /** * Sets the {@link EventListener}s whose callback functions * will be called when specific RocksDB event happens. - * + *

* Note: the RocksJava API currently only supports EventListeners implemented in Java. * It could be extended in future to also support adding/removing EventListeners implemented in * C++. @@ -978,7 +982,7 @@ public interface DBOptionsInterface> { /** * Sets the {@link EventListener}s whose callback functions * will be called when specific RocksDB event happens. - * + *

* Note: the RocksJava API currently only supports EventListeners implemented in Java. * It could be extended in future to also support adding/removing EventListeners implemented in * C++. @@ -990,7 +994,7 @@ public interface DBOptionsInterface> { /** * If true, then the status of the threads involved in this DB will * be tracked and available via GetThreadList() API. - * + *

* Default: false * * @param enableThreadTracking true to enable tracking @@ -1002,7 +1006,7 @@ public interface DBOptionsInterface> { /** * If true, then the status of the threads involved in this DB will * be tracked and available via GetThreadList() API. - * + *

* Default: false * * @return true if tracking is enabled @@ -1013,7 +1017,7 @@ public interface DBOptionsInterface> { * By default, a single write thread queue is maintained. The thread gets * to the head of the queue becomes write batch group leader and responsible * for writing to WAL and memtable for the batch group. - * + *

* If {@link #enablePipelinedWrite()} is true, separate write thread queue is * maintained for WAL write and memtable write. A write thread first enter WAL * writer queue and then memtable writer queue. Pending thread on the WAL @@ -1021,7 +1025,7 @@ public interface DBOptionsInterface> { * WAL writing but not the memtable writing. Enabling the feature may improve * write throughput and reduce latency of the prepare phase of two-phase * commit. - * + *

* Default: false * * @param enablePipelinedWrite true to enabled pipelined writes @@ -1048,7 +1052,7 @@ public interface DBOptionsInterface> { * throughput. Using TransactionDB with WRITE_PREPARED write policy and * {@link #twoWriteQueues()} true is one way to achieve immutable snapshots despite * unordered_write. - * + *

* By default, i.e., when it is false, rocksdb does not advance the sequence * number for new snapshots unless all the writes with lower sequence numbers * are already finished. This provides the immutability that we except from @@ -1193,7 +1197,7 @@ T setEnableWriteThreadAdaptiveYield( * compaction decision by loading table properties from many files. * Turning off this feature will improve DBOpen time especially in * disk environment. - * + *

* Default: false * * @param skipStatsUpdateOnDbOpen true if updating stats will be skipped @@ -1207,7 +1211,7 @@ T setEnableWriteThreadAdaptiveYield( * compaction decision by loading table properties from many files. * Turning off this feature will improve DBOpen time especially in * disk environment. - * + *

* Default: false * * @return true if updating stats will be skipped @@ -1221,7 +1225,7 @@ T setEnableWriteThreadAdaptiveYield( * We'll still check that all required sst files exist. * If {@code paranoid_checks} is false, this option is ignored, and sst files are * not checked at all. - * + *

* Default: false * * @param skipCheckingSstFileSizesOnDbOpen if true, then SST file sizes will not be checked @@ -1237,7 +1241,7 @@ T setEnableWriteThreadAdaptiveYield( * We'll still check that all required sst files exist. * If {@code paranoid_checks} is false, this option is ignored, and sst files are * not checked at all. - * + *

* Default: false * * @return true, if file sizes will not be checked when calling {@link RocksDB#open(String)}. @@ -1246,7 +1250,7 @@ T setEnableWriteThreadAdaptiveYield( /** * Recovery mode to control the consistency while replaying WAL - * + *

* Default: {@link WALRecoveryMode#PointInTimeRecovery} * * @param walRecoveryMode The WAL recover mode @@ -1257,7 +1261,7 @@ T setEnableWriteThreadAdaptiveYield( /** * Recovery mode to control the consistency while replaying WAL - * + *

* Default: {@link WALRecoveryMode#PointInTimeRecovery} * * @return The WAL recover mode @@ -1267,7 +1271,7 @@ T setEnableWriteThreadAdaptiveYield( /** * if set to false then recovery will fail when a prepared * transaction is encountered in the WAL - * + *

* Default: false * * @param allow2pc true if two-phase-commit is enabled @@ -1279,7 +1283,7 @@ T setEnableWriteThreadAdaptiveYield( /** * if set to false then recovery will fail when a prepared * transaction is encountered in the WAL - * + *

* Default: false * * @return true if two-phase-commit is enabled @@ -1288,7 +1292,7 @@ T setEnableWriteThreadAdaptiveYield( /** * A global cache for table-level rows. - * + *

* Default: null (disabled) * * @param rowCache The global row cache @@ -1299,7 +1303,7 @@ T setEnableWriteThreadAdaptiveYield( /** * A global cache for table-level rows. - * + *

* Default: null (disabled) * * @return The global row cache @@ -1331,7 +1335,7 @@ T setEnableWriteThreadAdaptiveYield( * If true, then DB::Open / CreateColumnFamily / DropColumnFamily * / SetOptions will fail if options file is not detected or properly * persisted. - * + *

* DEFAULT: false * * @param failIfOptionsFileError true if we should fail if there is an error @@ -1345,7 +1349,7 @@ T setEnableWriteThreadAdaptiveYield( * If true, then DB::Open / CreateColumnFamily / DropColumnFamily * / SetOptions will fail if options file is not detected or properly * persisted. - * + *

* DEFAULT: false * * @return true if we should fail if there is an error in the options file @@ -1355,7 +1359,7 @@ T setEnableWriteThreadAdaptiveYield( /** * If true, then print malloc stats together with rocksdb.stats * when printing to LOG. - * + *

* DEFAULT: false * * @param dumpMallocStats true if malloc stats should be printed to LOG @@ -1367,7 +1371,7 @@ T setEnableWriteThreadAdaptiveYield( /** * If true, then print malloc stats together with rocksdb.stats * when printing to LOG. - * + *

* DEFAULT: false * * @return true if malloc stats should be printed to LOG @@ -1380,7 +1384,7 @@ T setEnableWriteThreadAdaptiveYield( * to avoid (but not guarantee not to) flush during recovery. Also, existing * WAL logs will be kept, so that if crash happened before flush, we still * have logs to recover from. - * + *

* DEFAULT: false * * @param avoidFlushDuringRecovery true to try to avoid (but not guarantee @@ -1396,7 +1400,7 @@ T setEnableWriteThreadAdaptiveYield( * to avoid (but not guarantee not to) flush during recovery. Also, existing * WAL logs will be kept, so that if crash happened before flush, we still * have logs to recover from. - * + *

* DEFAULT: false * * @return true to try to avoid (but not guarantee not to) flush during @@ -1412,7 +1416,7 @@ T setEnableWriteThreadAdaptiveYield( * 1) Disable some internal optimizations around SST file compression * 2) Reserve bottom-most level for ingested files only. * 3) Note that num_levels should be >= 3 if this option is turned on. - * + *

* DEFAULT: false * * @param allowIngestBehind true to allow ingest behind, false to disallow. @@ -1435,7 +1439,7 @@ T setEnableWriteThreadAdaptiveYield( * allows the memtable writes not to lag behind other writes. It can be used * to optimize MySQL 2PC in which only the commits, which are serial, write to * memtable. - * + *

* DEFAULT: false * * @param twoWriteQueues true to enable two write queues, false otherwise. @@ -1455,7 +1459,7 @@ T setEnableWriteThreadAdaptiveYield( * If true WAL is not flushed automatically after each write. Instead it * relies on manual invocation of FlushWAL to write the WAL buffer to its * file. - * + *

* DEFAULT: false * * @param manualWalFlush true to set disable automatic WAL flushing, @@ -1483,7 +1487,7 @@ T setEnableWriteThreadAdaptiveYield( * For manual flush, application has to specify which column families to * flush atomically in {@link RocksDB#flush(FlushOptions, List)}. * For auto-triggered flush, RocksDB atomically flushes ALL column families. - * + *

* Currently, any WAL-enabled writes after atomic flush may be replayed * independently if the process crashes later and tries to recover. * @@ -1495,7 +1499,7 @@ T setEnableWriteThreadAdaptiveYield( /** * Determine if atomic flush of multiple column families is enabled. - * + *

* See {@link #setAtomicFlush(boolean)}. * * @return true if atomic flush is enabled. @@ -1596,7 +1600,7 @@ T setEnableWriteThreadAdaptiveYield( * The number of bytes to prefetch when reading the log. This is mostly useful * for reading a remotely located log, as it can save the number of * round-trips. If 0, then the prefetching is disabled. - * + *

* Default: 0 * * @param logReadaheadSize the number of bytes to prefetch when reading the log. @@ -1608,7 +1612,7 @@ T setEnableWriteThreadAdaptiveYield( * The number of bytes to prefetch when reading the log. This is mostly useful * for reading a remotely located log, as it can save the number of * round-trips. If 0, then the prefetching is disabled. - * + *

* Default: 0 * * @return the number of bytes to prefetch when reading the log. @@ -1651,7 +1655,7 @@ T setEnableWriteThreadAdaptiveYield( * can be auto-recovered (e.g., retryable IO Error during Flush or WAL write), * then db resume is called in background to recover from the error. If this * value is 0 or negative, db resume will not be called. - * + *

* Default: INT_MAX * * @param maxBgerrorResumeCount maximum number of times db resume should be called when IO Error @@ -1667,7 +1671,7 @@ T setEnableWriteThreadAdaptiveYield( * can be auto-recovered (e.g., retryable IO Error during Flush or WAL write), * then db resume is called in background to recover from the error. If this * value is 0 or negative, db resume will not be called. - * + *

* Default: INT_MAX * * @return maximum number of times db resume should be called when IO Error happens. @@ -1678,7 +1682,7 @@ T setEnableWriteThreadAdaptiveYield( * If max_bgerror_resume_count is ≥ 2, db resume is called multiple times. * This option decides how long to wait to retry the next resume if the * previous resume fails and satisfy redo resume conditions. - * + *

* Default: 1000000 (microseconds). * * @param bgerrorResumeRetryInterval how many microseconds to wait between DB resume attempts. @@ -1690,7 +1694,7 @@ T setEnableWriteThreadAdaptiveYield( * If max_bgerror_resume_count is ≥ 2, db resume is called multiple times. * This option decides how long to wait to retry the next resume if the * previous resume fails and satisfy redo resume conditions. - * + *

* Default: 1000000 (microseconds). * * @return the instance of the current object. diff --git a/java/src/main/java/org/rocksdb/DbPath.java b/java/src/main/java/org/rocksdb/DbPath.java index 3f0b67557c5e..3895b258556e 100644 --- a/java/src/main/java/org/rocksdb/DbPath.java +++ b/java/src/main/java/org/rocksdb/DbPath.java @@ -14,6 +14,12 @@ public class DbPath { final Path path; final long targetSize; + /** + * Constructs a DbPath. + * + * @param path the path. + * @param targetSize the target size. + */ public DbPath(final Path path, final long targetSize) { this.path = path; this.targetSize = targetSize; diff --git a/java/src/main/java/org/rocksdb/DirectSlice.java b/java/src/main/java/org/rocksdb/DirectSlice.java index 88ec29e3bd65..83f7a2acc5e0 100644 --- a/java/src/main/java/org/rocksdb/DirectSlice.java +++ b/java/src/main/java/org/rocksdb/DirectSlice.java @@ -16,6 +16,10 @@ * values consider using @see org.rocksdb.Slice */ public class DirectSlice extends AbstractSlice { + + /** + * Constant for No Direct Slice. + */ public static final DirectSlice NONE = new DirectSlice(); /** @@ -110,6 +114,11 @@ public void removePrefix(final int n) { this.internalBufferOffset += n; } + /** + * Set the length of the direct slice. + * + * @param n the length. + */ public void setLength(final int n) { setLength0(getNativeHandle(), n); } diff --git a/java/src/main/java/org/rocksdb/Experimental.java b/java/src/main/java/org/rocksdb/Experimental.java index 64b404d6f195..13ac5a0e3b19 100644 --- a/java/src/main/java/org/rocksdb/Experimental.java +++ b/java/src/main/java/org/rocksdb/Experimental.java @@ -19,5 +19,10 @@ @Retention(RetentionPolicy.SOURCE) @Target({ElementType.TYPE, ElementType.METHOD}) public @interface Experimental { + /** + * A description explaining why the feature is experimental. + * + * @return the explanation of why the feature is experimental. + */ String value(); } diff --git a/java/src/main/java/org/rocksdb/ExternalFileIngestionInfo.java b/java/src/main/java/org/rocksdb/ExternalFileIngestionInfo.java index 7a99dd6bfe2f..4a348ab32389 100644 --- a/java/src/main/java/org/rocksdb/ExternalFileIngestionInfo.java +++ b/java/src/main/java/org/rocksdb/ExternalFileIngestionInfo.java @@ -7,6 +7,9 @@ import java.util.Objects; +/** + * Information about the ingestion of External Files. + */ public class ExternalFileIngestionInfo { private final String columnFamilyName; private final String externalFilePath; diff --git a/java/src/main/java/org/rocksdb/Filter.java b/java/src/main/java/org/rocksdb/Filter.java index 018807c0405a..94f3c4d6a82f 100644 --- a/java/src/main/java/org/rocksdb/Filter.java +++ b/java/src/main/java/org/rocksdb/Filter.java @@ -15,13 +15,18 @@ //TODO(AR) should be renamed FilterPolicy public abstract class Filter extends RocksObject { + /** + * Constructs a filter. + * + * @param nativeHandle reference to the value of the C++ pointer pointing to the underlying native RocksDB C++ filter object. + */ protected Filter(final long nativeHandle) { super(nativeHandle); } /** * Deletes underlying C++ filter pointer. - * + *

* Note that this function should be called only after all * RocksDB instances referencing the filter are closed. * Otherwise an undefined behavior will occur. diff --git a/java/src/main/java/org/rocksdb/FilterPolicyType.java b/java/src/main/java/org/rocksdb/FilterPolicyType.java index 6a693ee4039d..c7051ac07be6 100644 --- a/java/src/main/java/org/rocksdb/FilterPolicyType.java +++ b/java/src/main/java/org/rocksdb/FilterPolicyType.java @@ -9,6 +9,9 @@ * IndexType used in conjunction with BlockBasedTable. */ public enum FilterPolicyType { + /** + * Unknown filter policy. + */ kUnknownFilterPolicy((byte) 0), /** @@ -25,7 +28,7 @@ public enum FilterPolicyType { */ kRibbonFilterPolicy((byte) 2); - public Filter createFilter(final long handle, final double param) { + Filter createFilter(final long handle, final double param) { if (this == kBloomFilterPolicy) { return new BloomFilter(handle, param); } diff --git a/java/src/main/java/org/rocksdb/FlushJobInfo.java b/java/src/main/java/org/rocksdb/FlushJobInfo.java index 414d3a2f332e..52af3afe1795 100644 --- a/java/src/main/java/org/rocksdb/FlushJobInfo.java +++ b/java/src/main/java/org/rocksdb/FlushJobInfo.java @@ -7,6 +7,9 @@ import java.util.Objects; +/** + * Information about a flush job. + */ public class FlushJobInfo { private final long columnFamilyId; private final String columnFamilyName; diff --git a/java/src/main/java/org/rocksdb/FlushReason.java b/java/src/main/java/org/rocksdb/FlushReason.java index 21abbb352134..177e5aa3656c 100644 --- a/java/src/main/java/org/rocksdb/FlushReason.java +++ b/java/src/main/java/org/rocksdb/FlushReason.java @@ -5,21 +5,83 @@ package org.rocksdb; +/** + * Reasons for a flush. + */ public enum FlushReason { + /** + * Other. + */ OTHERS((byte) 0x00), + + /** + * Get live files. + */ GET_LIVE_FILES((byte) 0x01), + + /** + * Shutdown. + */ SHUTDOWN((byte) 0x02), + + /** + * External file ingestion. + */ EXTERNAL_FILE_INGESTION((byte) 0x03), + + /** + * Manual compaction. + */ MANUAL_COMPACTION((byte) 0x04), + + /** + * Write buffer manager. + */ WRITE_BUFFER_MANAGER((byte) 0x05), + + /** + * Write buffer full. + */ WRITE_BUFFER_FULL((byte) 0x06), + + /** + * Test. + */ TEST((byte) 0x07), + + /** + * Delete file(s). + */ DELETE_FILES((byte) 0x08), + + /** + * Automatic compaction. + */ AUTO_COMPACTION((byte) 0x09), + + /** + * Manual flush. + */ MANUAL_FLUSH((byte) 0x0a), + + /** + * Error recovery. + */ ERROR_RECOVERY((byte) 0x0b), + + /** + * Error recovery retry flush. + */ ERROR_RECOVERY_RETRY_FLUSH((byte) 0x0c), + + /** + * Write Ahead Log full. + */ WAL_FULL((byte) 0x0d), + + /** + * Catch up after error recovery. + */ CATCH_UP_AFTER_ERROR_RECOVERY((byte) 0x0e); private final byte value; diff --git a/java/src/main/java/org/rocksdb/GetStatus.java b/java/src/main/java/org/rocksdb/GetStatus.java index a2afafe39ebd..a7ab4902f3f3 100644 --- a/java/src/main/java/org/rocksdb/GetStatus.java +++ b/java/src/main/java/org/rocksdb/GetStatus.java @@ -12,7 +12,15 @@ * If the target of the fetch is not big enough, this may be bigger than the contents of the target. */ public class GetStatus { + + /** + * The status of the request to fetch into the buffer. + */ public final Status status; + + /** + * The size of the data, which may be bigger than the buffer. + */ public final int requiredSize; /** diff --git a/java/src/main/java/org/rocksdb/HashLinkedListMemTableConfig.java b/java/src/main/java/org/rocksdb/HashLinkedListMemTableConfig.java index cc18b61d2260..4edfd3a0d7fa 100644 --- a/java/src/main/java/org/rocksdb/HashLinkedListMemTableConfig.java +++ b/java/src/main/java/org/rocksdb/HashLinkedListMemTableConfig.java @@ -15,22 +15,42 @@ * and post a warning in the LOG. */ public class HashLinkedListMemTableConfig extends MemTableConfig { + + /** + * The default number of buckets. + */ public static final long DEFAULT_BUCKET_COUNT = 50_000; + + /** + * The default size of huge TLB pages. + */ public static final long DEFAULT_HUGE_PAGE_TLB_SIZE = 0; + + /** + * The default log threshold for bucket entries. + */ public static final int DEFAULT_BUCKET_ENTRIES_LOG_THRES = 4096; + + /** + * The default of whether to log when a bucket is flushed. + */ public static final boolean DEFAULT_IF_LOG_BUCKET_DIST_WHEN_FLUSH = true; - public static final int DEFAUL_THRESHOLD_USE_SKIPLIST = 256; /** - * HashLinkedListMemTableConfig constructor + * The default threshold for determining when to use a Skip List. + */ + public static final int DEFAULT_THRESHOLD_USE_SKIPLIST = 256; + + /** + * Constructs a HashLinkedListMemTableConfig. */ public HashLinkedListMemTableConfig() { bucketCount_ = DEFAULT_BUCKET_COUNT; hugePageTlbSize_ = DEFAULT_HUGE_PAGE_TLB_SIZE; bucketEntriesLoggingThreshold_ = DEFAULT_BUCKET_ENTRIES_LOG_THRES; ifLogBucketDistWhenFlush_ = DEFAULT_IF_LOG_BUCKET_DIST_WHEN_FLUSH; - thresholdUseSkiplist_ = DEFAUL_THRESHOLD_USE_SKIPLIST; + thresholdUseSkiplist_ = DEFAULT_THRESHOLD_USE_SKIPLIST; } /** diff --git a/java/src/main/java/org/rocksdb/HashSkipListMemTableConfig.java b/java/src/main/java/org/rocksdb/HashSkipListMemTableConfig.java index 33991f90f729..187866213c4c 100644 --- a/java/src/main/java/org/rocksdb/HashSkipListMemTableConfig.java +++ b/java/src/main/java/org/rocksdb/HashSkipListMemTableConfig.java @@ -15,12 +15,24 @@ * and post a warning in the LOG. */ public class HashSkipListMemTableConfig extends MemTableConfig { + + /** + * The default number of buckets. + */ public static final int DEFAULT_BUCKET_COUNT = 1_000_000; + + /** + * The default branching factor. + */ public static final int DEFAULT_BRANCHING_FACTOR = 4; + + /** + * The default skip list height. + */ public static final int DEFAULT_HEIGHT = 4; /** - * HashSkipListMemTableConfig constructor + * Constructs a HashSkipListMemTableConfig. */ public HashSkipListMemTableConfig() { bucketCount_ = DEFAULT_BUCKET_COUNT; diff --git a/java/src/main/java/org/rocksdb/HistogramData.java b/java/src/main/java/org/rocksdb/HistogramData.java index 81d890883487..1fdd0c26e9a7 100644 --- a/java/src/main/java/org/rocksdb/HistogramData.java +++ b/java/src/main/java/org/rocksdb/HistogramData.java @@ -5,6 +5,9 @@ package org.rocksdb; +/** + * Histogram Data. + */ public class HistogramData { private final double median_; private final double percentile95_; @@ -16,12 +19,34 @@ public class HistogramData { private final long sum_; private final double min_; + /** + * Constructs a HistogramData. + * + * @param median the median value. + * @param percentile95 the 95th percentile value. + * @param percentile99 the 99th percentile value. + * @param average the average value. + * @param standardDeviation the value of the standard deviation. + */ public HistogramData(final double median, final double percentile95, final double percentile99, final double average, final double standardDeviation) { this(median, percentile95, percentile99, average, standardDeviation, 0.0, 0, 0, 0.0); } + /** + * Constructs a HistogramData. + * + * @param median the median value. + * @param percentile95 the 95th percentile value. + * @param percentile99 the 99th percentile value. + * @param average the average value. + * @param standardDeviation the value of the standard deviation. + * @param max the maximum value. + * @param count the number of values. + * @param sum the sum of the values. + * @param min the minimum value. + */ public HistogramData(final double median, final double percentile95, final double percentile99, final double average, final double standardDeviation, final double max, final long count, @@ -37,38 +62,83 @@ public HistogramData(final double median, final double percentile95, sum_ = sum; } + /** + * Get the median value. + * + * @return the median value. + */ public double getMedian() { return median_; } + /** + * Get the 95th percentile value. + * + * @return the 95th percentile value. + */ public double getPercentile95() { return percentile95_; } + /** + * Get the 99th percentile value. + * + * @return the 99th percentile value. + */ public double getPercentile99() { return percentile99_; } + /** + * Get the average value. + * + * @return the average value. + */ public double getAverage() { return average_; } + /** + * Get the value of the standard deviation. + * + * @return the value of the standard deviation. + */ public double getStandardDeviation() { return standardDeviation_; } + /** + * Get the maximum value. + * + * @return the maximum value. + */ public double getMax() { return max_; } + /** + * Get the number of values. + * + * @return the number of values. + */ public long getCount() { return count_; } + /** + * Get the sum of the values. + * + * @return the sum of the values. + */ public long getSum() { return sum_; } + /** + * Get the minimum value. + * + * @return the minimum value. + */ public double getMin() { return min_; } diff --git a/java/src/main/java/org/rocksdb/HistogramType.java b/java/src/main/java/org/rocksdb/HistogramType.java index b4a56cc07e0d..d503e75154b4 100644 --- a/java/src/main/java/org/rocksdb/HistogramType.java +++ b/java/src/main/java/org/rocksdb/HistogramType.java @@ -5,69 +5,180 @@ package org.rocksdb; +/** + * The types of histogram. + */ public enum HistogramType { + /** + * DB Get. + */ DB_GET((byte) 0x0), + /** + * DB Write. + */ DB_WRITE((byte) 0x1), + /** + * Time spent in compaction. + */ COMPACTION_TIME((byte) 0x2), + /** + * CPU time spent in compaction. + */ COMPACTION_CPU_TIME((byte) 0x3), + /** + * Time spent in setting up sub-compaction. + */ SUBCOMPACTION_SETUP_TIME((byte) 0x4), + /** + * Time spent in IO during table sync. + * Measured in microseconds. + */ TABLE_SYNC_MICROS((byte) 0x5), + /** + * Time spent in IO during compaction of outfile. + * Measured in microseconds. + */ COMPACTION_OUTFILE_SYNC_MICROS((byte) 0x6), + /** + * Time spent in IO during WAL file sync. + * Measured in microseconds. + */ WAL_FILE_SYNC_MICROS((byte) 0x7), + /** + * Time spent in IO during manifest file sync. + * Measured in microseconds. + */ MANIFEST_FILE_SYNC_MICROS((byte) 0x8), /** - * TIME SPENT IN IO DURING TABLE OPEN. + * Time spent in IO during table open. + * Measured in microseconds. */ TABLE_OPEN_IO_MICROS((byte) 0x9), + /** + * DB Multi-Get. + */ DB_MULTIGET((byte) 0xA), + /** + * Time spent in block reads during compaction. + * Measured in microseconds. + */ READ_BLOCK_COMPACTION_MICROS((byte) 0xB), + /** + * Time spent in block reads. + * Measured in microseconds. + */ READ_BLOCK_GET_MICROS((byte) 0xC), + /** + * Time spent in raw block writes. + * Measured in microseconds. + */ WRITE_RAW_BLOCK_MICROS((byte) 0xD), + /** + * Number of files in a single compaction. + */ NUM_FILES_IN_SINGLE_COMPACTION((byte) 0xE), + /** + * DB Seek. + */ DB_SEEK((byte) 0xF), + /** + * Write stall. + */ WRITE_STALL((byte) 0x10), + /** + * Time spent in SST reads. + * Measured in microseconds. + */ SST_READ_MICROS((byte) 0x11), + /** + * File read during flush. + * Measured in microseconds. + */ FILE_READ_FLUSH_MICROS((byte) 0x12), + /** + * File read during compaction. + * Measured in microseconds. + */ FILE_READ_COMPACTION_MICROS((byte) 0x13), + /** + * File read during DB Open. + * Measured in microseconds. + */ FILE_READ_DB_OPEN_MICROS((byte) 0x14), + /** + * File read during DB Get. + * Measured in microseconds. + */ FILE_READ_GET_MICROS((byte) 0x15), + /** + * File read during DB Multi-Get. + * Measured in microseconds. + */ FILE_READ_MULTIGET_MICROS((byte) 0x16), + /** + * File read during DB Iterator. + * Measured in microseconds. + */ FILE_READ_DB_ITERATOR_MICROS((byte) 0x17), + /** + * File read during DB checksum validation. + * Measured in microseconds. + */ FILE_READ_VERIFY_DB_CHECKSUM_MICROS((byte) 0x18), + /** + * File read during file checksum validation. + * Measured in microseconds. + */ FILE_READ_VERIFY_FILE_CHECKSUMS_MICROS((byte) 0x19), + /** + * Time spent writing SST files. + * Measured in microseconds. + */ SST_WRITE_MICROS((byte) 0x1A), + /** + * Time spent in writing SST table (currently only block-based table) or blob file for flush. + * Measured in microseconds. + */ FILE_WRITE_FLUSH_MICROS((byte) 0x1B), + /** + * Time spent in writing SST table (currently only block-based table) for compaction. + * Measured in microseconds. + */ FILE_WRITE_COMPACTION_MICROS((byte) 0x1C), + /** + * Time spent in writing SST table (currently only block-based table) or blob file for db open. + * Measured in microseconds. + */ FILE_WRITE_DB_OPEN_MICROS((byte) 0x1D), /** @@ -79,13 +190,34 @@ public enum HistogramType { * Value size distribution in each operation. */ BYTES_PER_READ((byte) 0x1F), + + /** + * Bytes per write. + * Value size distribution in each operation. + */ BYTES_PER_WRITE((byte) 0x20), + + /** + * Bytes per Multi-Get. + * Value size distribution in each operation. + */ BYTES_PER_MULTIGET((byte) 0x21), + /** + * Time spent in compression. + * Measured in nanoseconds. + */ COMPRESSION_TIMES_NANOS((byte) 0x22), + /** + * Time spent in decompression. + * Measured in nanoseconds. + */ DECOMPRESSION_TIMES_NANOS((byte) 0x23), + /** + * Number of merge operands for read. + */ READ_NUM_MERGE_OPERANDS((byte) 0x24), /** @@ -100,56 +232,67 @@ public enum HistogramType { /** * BlobDB Put/PutWithTTL/PutUntil/Write latency. + * Measured in microseconds. */ BLOB_DB_WRITE_MICROS((byte) 0x27), /** * BlobDB Get lagency. + * Measured in microseconds. */ BLOB_DB_GET_MICROS((byte) 0x28), /** * BlobDB MultiGet latency. + * Measured in microseconds. */ BLOB_DB_MULTIGET_MICROS((byte) 0x29), /** * BlobDB Seek/SeekToFirst/SeekToLast/SeekForPrev latency. + * Measured in microseconds. */ BLOB_DB_SEEK_MICROS((byte) 0x2A), /** * BlobDB Next latency. + * Measured in microseconds. */ BLOB_DB_NEXT_MICROS((byte) 0x2B), /** * BlobDB Prev latency. + * Measured in microseconds. */ BLOB_DB_PREV_MICROS((byte) 0x2C), /** * Blob file write latency. + * Measured in microseconds. */ BLOB_DB_BLOB_FILE_WRITE_MICROS((byte) 0x2D), /** * Blob file read latency. + * Measured in microseconds. */ BLOB_DB_BLOB_FILE_READ_MICROS((byte) 0x2E), /** * Blob file sync latency. + * Measured in microseconds. */ BLOB_DB_BLOB_FILE_SYNC_MICROS((byte) 0x2F), /** * BlobDB compression time. + * Measured in microseconds. */ BLOB_DB_COMPRESSION_MICROS((byte) 0x30), /** * BlobDB decompression time. + * Measured in microseconds. */ BLOB_DB_DECOMPRESSION_MICROS((byte) 0x31), @@ -159,18 +302,17 @@ public enum HistogramType { FLUSH_TIME((byte) 0x32), /** - * Number of MultiGet batch keys overlapping a file + * Number of MultiGet batch keys overlapping a file. */ SST_BATCH_SIZE((byte) 0x33), /** - * Size of a single IO batch issued by MultiGet + * Size of a single IO batch issued by MultiGet. */ MULTIGET_IO_BATCH_SIZE((byte) 0x34), /** - * Num of Index and Filter blocks read from file system per level in MultiGet - * request + * Num of Index and Filter blocks read from file system per level in MultiGet request. */ NUM_INDEX_AND_FILTER_BLOCKS_READ_PER_LEVEL((byte) 0x35), @@ -185,12 +327,19 @@ public enum HistogramType { NUM_LEVEL_READ_PER_MULTIGET((byte) 0x37), /** - * The number of retry in auto resume + * The number of retry in auto resume. */ ERROR_HANDLER_AUTORESUME_RETRY_COUNT((byte) 0x38), + /** + * Bytes read asynchronously. + */ ASYNC_READ_BYTES((byte) 0x39), + /** + * Wait time for polling. + * Measured in microseconds. + */ POLL_WAIT_MICROS((byte) 0x3A), /** @@ -199,17 +348,19 @@ public enum HistogramType { PREFETCHED_BYTES_DISCARDED((byte) 0x3B), /** - * Wait time for aborting async read in FilePrefetchBuffer destructor + * Wait time for aborting async read in FilePrefetchBuffer destructor. + * Measured in microseconds. */ ASYNC_PREFETCH_ABORT_MICROS((byte) 0x3C), /** - * Number of bytes read for RocksDB's prefetching contents - * (as opposed to file system's prefetch) - * from the end of SST table during block based table open + * Number of bytes read for RocksDB's prefetching contents (as opposed to file system's prefetch) from the end of SST table during block based table open. */ TABLE_OPEN_PREFETCH_TAIL_READ_BYTES((byte) 0x3D), + /** + * Bytes prefetched during compaction. + */ COMPACTION_PREFETCH_BYTES((byte) 0x3F), /** @@ -217,12 +368,13 @@ public enum HistogramType { */ /** - * Time spent in Iterator::Prepare() for multi-scan (microseconds) + * Time spent in Iterator::Prepare() for multi-scan (microseconds). + * Measured in microseconds. */ MULTISCAN_PREPARE_MICROS((byte) 0x40), /** - * Number of blocks per multi-scan Prepare() call + * Number of blocks per multi-scan Prepare() call. */ MULTISCAN_BLOCKS_PER_PREPARE((byte) 0x41), diff --git a/java/src/main/java/org/rocksdb/Holder.java b/java/src/main/java/org/rocksdb/Holder.java index 716a0bda0736..dd088dcd767e 100644 --- a/java/src/main/java/org/rocksdb/Holder.java +++ b/java/src/main/java/org/rocksdb/Holder.java @@ -7,6 +7,8 @@ /** * Simple instance reference wrapper. + * + * @param the concrete type that this holder holds. */ public class Holder { private /* @Nullable */ T value; diff --git a/java/src/main/java/org/rocksdb/ImportColumnFamilyOptions.java b/java/src/main/java/org/rocksdb/ImportColumnFamilyOptions.java index 652bd19dc8c1..0c5dea7de6ba 100644 --- a/java/src/main/java/org/rocksdb/ImportColumnFamilyOptions.java +++ b/java/src/main/java/org/rocksdb/ImportColumnFamilyOptions.java @@ -12,6 +12,10 @@ * ExportImportFilesMetaData)}. */ public class ImportColumnFamilyOptions extends RocksObject { + + /** + * Constructs an ImportColumnFamilyOptions. + */ public ImportColumnFamilyOptions() { super(newImportColumnFamilyOptions()); } diff --git a/java/src/main/java/org/rocksdb/IndexShorteningMode.java b/java/src/main/java/org/rocksdb/IndexShorteningMode.java index a68346c3823c..2d4b3f9ad629 100644 --- a/java/src/main/java/org/rocksdb/IndexShorteningMode.java +++ b/java/src/main/java/org/rocksdb/IndexShorteningMode.java @@ -11,7 +11,7 @@ * enabled ({@link DBOptions#useDirectReads()} == true). * The default mode is the best tradeoff for most use cases. * This option only affects newly written tables. - * + *

* The index contains a key separating each pair of consecutive blocks. * Let A be the highest key in one block, B the lowest key in the next block, * and I the index entry separating these two blocks: @@ -22,7 +22,7 @@ * However, if I=A, this can't happen, and we'll read only the second block. * In kNoShortening mode, we use I=A. In other modes, we use the shortest * key in [A, B), which usually significantly reduces index size. - * + *

* There's a similar story for the last index entry, which is an upper bound * of the highest key in the file. If it's shortened and therefore * overestimated, iterator is likely to unnecessarily read the last data block diff --git a/java/src/main/java/org/rocksdb/InfoLogLevel.java b/java/src/main/java/org/rocksdb/InfoLogLevel.java index 197bd89dab68..c5fda9acd7c3 100644 --- a/java/src/main/java/org/rocksdb/InfoLogLevel.java +++ b/java/src/main/java/org/rocksdb/InfoLogLevel.java @@ -5,12 +5,39 @@ * RocksDB log levels. */ public enum InfoLogLevel { + /** + * Log 'debug' level events. + */ DEBUG_LEVEL((byte)0), + + /** + * Log 'info' level events. + */ INFO_LEVEL((byte)1), + + /** + * Log 'warn' level events. + */ WARN_LEVEL((byte)2), + + /** + * Log 'error' level events. + */ ERROR_LEVEL((byte)3), + + /** + * Log 'fatal' level events. + */ FATAL_LEVEL((byte)4), + + /** + * Log 'header' level events. + */ HEADER_LEVEL((byte)5), + + /** + * The number of log levels available. + */ NUM_INFO_LOG_LEVELS((byte)6); private final byte value_; diff --git a/java/src/main/java/org/rocksdb/IngestExternalFileOptions.java b/java/src/main/java/org/rocksdb/IngestExternalFileOptions.java index aed28131a17e..040313c59fc2 100644 --- a/java/src/main/java/org/rocksdb/IngestExternalFileOptions.java +++ b/java/src/main/java/org/rocksdb/IngestExternalFileOptions.java @@ -12,11 +12,16 @@ */ public class IngestExternalFileOptions extends RocksObject { + /** + * Constructs an IngestExternalFileOptions. + */ public IngestExternalFileOptions() { super(newIngestExternalFileOptions()); } /** + * Constructs an IngestExternalFileOptions. + * * @param moveFiles {@link #setMoveFiles(boolean)} * @param snapshotConsistency {@link #setSnapshotConsistency(boolean)} * @param allowGlobalSeqNo {@link #setAllowGlobalSeqNo(boolean)} diff --git a/java/src/main/java/org/rocksdb/KeyMayExist.java b/java/src/main/java/org/rocksdb/KeyMayExist.java index cd2267528d85..98d176f6d4fd 100644 --- a/java/src/main/java/org/rocksdb/KeyMayExist.java +++ b/java/src/main/java/org/rocksdb/KeyMayExist.java @@ -5,8 +5,12 @@ package org.rocksdb; +import java.nio.ByteBuffer; import java.util.Objects; +/** + * Indicates whether a key exists or not, and its corresponding value's length. + */ public class KeyMayExist { @Override public boolean equals(final Object o) { @@ -23,13 +27,44 @@ public int hashCode() { return Objects.hash(exists, valueLength); } - public enum KeyMayExistEnum { kNotExist, kExistsWithoutValue, kExistsWithValue } + /** + * Part of the return type from {@link RocksDB#keyMayExist(ColumnFamilyHandle, ByteBuffer, ByteBuffer)}. + */ + public enum KeyMayExistEnum { + /** + * Key does not exist. + */ + kNotExist, + /** + * Key may exist without a value. + */ + kExistsWithoutValue, + + /** + * Key may exist with a value. + */ + kExistsWithValue + } + + /** + * Constructs a KeyMayExist. + * + * @param exists indicates if the key exists. + * @param valueLength the length of the value pointed to by the key (if it exists). + */ KeyMayExist(final KeyMayExistEnum exists, final int valueLength) { this.exists = exists; this.valueLength = valueLength; } + /** + * Indicates if the key exists. + */ public final KeyMayExistEnum exists; + + /** + * The length of the value pointed to by the key (if it exists). + */ public final int valueLength; } diff --git a/java/src/main/java/org/rocksdb/LiveFileMetaData.java b/java/src/main/java/org/rocksdb/LiveFileMetaData.java index 5242496a315b..9cac783eeb20 100644 --- a/java/src/main/java/org/rocksdb/LiveFileMetaData.java +++ b/java/src/main/java/org/rocksdb/LiveFileMetaData.java @@ -46,7 +46,7 @@ public int level() { return level; } - public long newLiveFileMetaDataHandle() { + private long newLiveFileMetaDataHandle() { return newLiveFileMetaDataHandle(columnFamilyName(), columnFamilyName().length, level(), fileName(), path(), size(), smallestSeqno(), largestSeqno(), smallestKey(), smallestKey().length, largestKey(), largestKey().length, numReadsSampled(), diff --git a/java/src/main/java/org/rocksdb/LogFile.java b/java/src/main/java/org/rocksdb/LogFile.java index 5ee2c9fcc64a..2be597ce5f9b 100644 --- a/java/src/main/java/org/rocksdb/LogFile.java +++ b/java/src/main/java/org/rocksdb/LogFile.java @@ -5,6 +5,9 @@ package org.rocksdb; +/** + * A (journal) log file. + */ @SuppressWarnings("PMD.MissingStaticMethodInNonInstantiatableClass") public class LogFile { private final String pathName; diff --git a/java/src/main/java/org/rocksdb/Logger.java b/java/src/main/java/org/rocksdb/Logger.java index b8d0e45efa09..e967bea33fa3 100644 --- a/java/src/main/java/org/rocksdb/Logger.java +++ b/java/src/main/java/org/rocksdb/Logger.java @@ -99,20 +99,45 @@ public InfoLogLevel infoLogLevel() { infoLogLevel(nativeHandle_)); } - @Override - public long getNativeHandle() { - return nativeHandle_; - } - @Override public final LoggerType getLoggerType() { return LoggerType.JAVA_IMPLEMENTATION; } - protected abstract void log(final InfoLogLevel logLevel, final String logMsg); + /** + * Log a message. + * + * @param logLevel the log level. + * @param logMsg the log message. + */ + protected abstract void log(final InfoLogLevel logLevel, + final String logMsg); + /** + * Create a new Logger with Options. + * + * @param logLevel the log level. + * + * @return the native handle to the underlying C++ native Logger object. + */ protected native long newLogger(final long logLevel); - protected native void setInfoLogLevel(final long handle, final byte logLevel); + + /** + * Set the log level. + * + * @param handle the native handle to the underlying C++ native Logger object. + * @param logLevel the log level. + */ + protected native void setInfoLogLevel(final long handle, + final byte logLevel); + + /** + * Get the log level. + * + * @param handle the native handle to the underlying C++ native Logger object. + * + * @return the log level. + */ protected native byte infoLogLevel(final long handle); /** diff --git a/java/src/main/java/org/rocksdb/MemTableInfo.java b/java/src/main/java/org/rocksdb/MemTableInfo.java index 3d429035a343..56396ac8d997 100644 --- a/java/src/main/java/org/rocksdb/MemTableInfo.java +++ b/java/src/main/java/org/rocksdb/MemTableInfo.java @@ -7,6 +7,9 @@ import java.util.Objects; +/** + * Information about a Mem Table. + */ public class MemTableInfo { private final String columnFamilyName; private final long firstSeqno; diff --git a/java/src/main/java/org/rocksdb/MergeOperator.java b/java/src/main/java/org/rocksdb/MergeOperator.java index c299f62210fa..630c400cfa9a 100644 --- a/java/src/main/java/org/rocksdb/MergeOperator.java +++ b/java/src/main/java/org/rocksdb/MergeOperator.java @@ -12,6 +12,12 @@ * value. */ public abstract class MergeOperator extends RocksObject { + + /** + * Constructs a MergeOperator. + * + * @param nativeHandle reference to the value of the C++ pointer pointing to the underlying native RocksDB C++ MergeOperator. + */ protected MergeOperator(final long nativeHandle) { super(nativeHandle); } diff --git a/java/src/main/java/org/rocksdb/MutableColumnFamilyOptions.java b/java/src/main/java/org/rocksdb/MutableColumnFamilyOptions.java index e54db7171e54..b58098119e9e 100644 --- a/java/src/main/java/org/rocksdb/MutableColumnFamilyOptions.java +++ b/java/src/main/java/org/rocksdb/MutableColumnFamilyOptions.java @@ -7,6 +7,9 @@ import java.util.*; +/** + * Mutable Column Family Options. + */ public class MutableColumnFamilyOptions extends AbstractMutableOptions { /** * User must use builder pattern, or parser. @@ -54,24 +57,87 @@ public static MutableColumnFamilyOptionsBuilder parse( return new MutableColumnFamilyOptionsBuilder().fromParsed(parsedOptions, ignoreUnknown); } + /** + * Parses a String representation of MutableColumnFamilyOptions + *

+ * The format is: key1=value1;key2=value2;key3=value3 etc + *

+ * For int[] values, each int should be separated by a colon, e.g. + *

+ * key1=value1;intArrayKey1=1:2:3 + * + * @param str The string representation of the mutable column family options + * + * @return A builder for the mutable column family options + */ public static MutableColumnFamilyOptionsBuilder parse(final String str) { return parse(str, false); } private interface MutableColumnFamilyOptionKey extends MutableOptionKey {} + /** + * Mem Table options. + */ public enum MemtableOption implements MutableColumnFamilyOptionKey { + /** + * Write buffer size. + */ write_buffer_size(ValueType.LONG), + + /** + * Arena block size. + */ arena_block_size(ValueType.LONG), + + /** + * Prefix size ratio for Memtable's Bloom Filter. + */ memtable_prefix_bloom_size_ratio(ValueType.DOUBLE), + + /** + * Whether to filter whole keys in the Memtable(s). + */ memtable_whole_key_filtering(ValueType.BOOLEAN), + + /** + * Number of bits for the prefix in Memtable's Bloom Filter. + */ @Deprecated memtable_prefix_bloom_bits(ValueType.INT), + + /** + * Number of probes for the prefix in Memtable's Bloom Filter. + */ @Deprecated memtable_prefix_bloom_probes(ValueType.INT), + + /** + * Huge Page Size for Memtable(s). + */ memtable_huge_page_size(ValueType.LONG), + + /** + * Maximum number of successive merges. + */ max_successive_merges(ValueType.LONG), + + /** + * Whether to filter deletes. + */ @Deprecated filter_deletes(ValueType.BOOLEAN), + + /** + * Maximum number of write buffers. + */ max_write_buffer_number(ValueType.INT), + + /** + * Number of in-place update locks. + */ inplace_update_num_locks(ValueType.LONG), + + /** + * Memory purge threshold. + */ experimental_mempurge_threshold(ValueType.DOUBLE); private final ValueType valueType; @@ -85,20 +151,78 @@ public ValueType getValueType() { } } + /** + * Compaction options. + */ public enum CompactionOption implements MutableColumnFamilyOptionKey { + /** + * Disable auto compaction. + */ disable_auto_compactions(ValueType.BOOLEAN), + + /** + * Soft limit on the number of bytes pending before compaction. + */ soft_pending_compaction_bytes_limit(ValueType.LONG), + + /** + * Hard limit on the number of bytes pending before compaction. + */ hard_pending_compaction_bytes_limit(ValueType.LONG), + + /** + * Number of files in Level 0 before compaction is triggered. + */ level0_file_num_compaction_trigger(ValueType.INT), + + /** + * Writes to Level 0 before a slowdown is triggered. + */ level0_slowdown_writes_trigger(ValueType.INT), + + /** + * Writes to Level 0 before a stop is triggered. + */ level0_stop_writes_trigger(ValueType.INT), + + /** + * Max compaction bytes. + */ max_compaction_bytes(ValueType.LONG), + + /** + * Target for the base size of files. + */ target_file_size_base(ValueType.LONG), + + /** + * Multiplier for the size of files. + */ target_file_size_multiplier(ValueType.INT), + + /** + * Maximum size in bytes for level base. + */ max_bytes_for_level_base(ValueType.LONG), + + /** + * Maximum bytes for level multiplier. + */ max_bytes_for_level_multiplier(ValueType.INT), + + /** + * Maximum bytes for level multiplier(s) additional + */ max_bytes_for_level_multiplier_additional(ValueType.INT_ARRAY), + + /** + * Time-to-live. + */ ttl(ValueType.LONG), + + /** + * Compaction period in seconds. + */ periodic_compaction_seconds(ValueType.LONG); private final ValueType valueType; @@ -112,16 +236,58 @@ public ValueType getValueType() { } } + /** + * Blob options. + */ public enum BlobOption implements MutableColumnFamilyOptionKey { + /** + * Enable BLOB files. + */ enable_blob_files(ValueType.BOOLEAN), + + /** + * Minimum BLOB size. + */ min_blob_size(ValueType.LONG), + + /** + * BLOB file size. + */ blob_file_size(ValueType.LONG), + + /** + * BLOB compression type. + */ blob_compression_type(ValueType.ENUM), + + /** + * Enable BLOB garbage collection. + */ enable_blob_garbage_collection(ValueType.BOOLEAN), + + /** + * BLOB garbage collection age cut-off. + */ blob_garbage_collection_age_cutoff(ValueType.DOUBLE), + + /** + * Threshold for forcing BLOB garbage collection. + */ blob_garbage_collection_force_threshold(ValueType.DOUBLE), + + /** + * BLOB compaction read-ahead size. + */ blob_compaction_readahead_size(ValueType.LONG), + + /** + * BLOB file starting level. + */ blob_file_starting_level(ValueType.INT), + + /** + * Prepopulate BLOB Cache. + */ prepopulate_blob_cache(ValueType.ENUM); private final ValueType valueType; @@ -135,10 +301,28 @@ public ValueType getValueType() { } } + /** + * Miscellaneous options. + */ public enum MiscOption implements MutableColumnFamilyOptionKey { + /** + * Maximum number of sequential keys to skip during iteration. + */ max_sequential_skip_in_iterations(ValueType.LONG), + + /** + * Whether to enable paranoid file checks. + */ paranoid_file_checks(ValueType.BOOLEAN), + + /** + * Whether to report background I/O stats. + */ report_bg_io_stats(ValueType.BOOLEAN), + + /** + * Compression type. + */ compression(ValueType.ENUM); private final ValueType valueType; @@ -152,6 +336,9 @@ public ValueType getValueType() { } } + /** + * Builder for constructing MutableColumnFamilyOptions. + */ public static class MutableColumnFamilyOptionsBuilder extends AbstractMutableOptionsBuilder implements MutableColumnFamilyOptionsInterface { diff --git a/java/src/main/java/org/rocksdb/MutableColumnFamilyOptionsInterface.java b/java/src/main/java/org/rocksdb/MutableColumnFamilyOptionsInterface.java index 729b0e882788..c637989d82fa 100644 --- a/java/src/main/java/org/rocksdb/MutableColumnFamilyOptionsInterface.java +++ b/java/src/main/java/org/rocksdb/MutableColumnFamilyOptionsInterface.java @@ -5,6 +5,11 @@ package org.rocksdb; +/** + * Interface for MutableColumnFamilyOptions. + * + * @param the concrete type of the MutableColumnFamilyOptions. + */ public interface MutableColumnFamilyOptionsInterface< T extends MutableColumnFamilyOptionsInterface> extends AdvancedMutableColumnFamilyOptionsInterface { diff --git a/java/src/main/java/org/rocksdb/MutableDBOptions.java b/java/src/main/java/org/rocksdb/MutableDBOptions.java index 894154e0df45..2913e6c71d00 100644 --- a/java/src/main/java/org/rocksdb/MutableDBOptions.java +++ b/java/src/main/java/org/rocksdb/MutableDBOptions.java @@ -10,6 +10,9 @@ import java.util.Map; import java.util.Objects; +/** + * Mutable Database Options. + */ public class MutableDBOptions extends AbstractMutableOptions { /** * User must use builder pattern, or parser. @@ -55,29 +58,110 @@ public static MutableDBOptionsBuilder parse(final String str, final boolean igno return new MutableDBOptions.MutableDBOptionsBuilder().fromParsed(parsedOptions, ignoreUnknown); } + /** + * Parses a String representation of MutableDBOptions + *

+ * The format is: key1=value1;key2=value2;key3=value3 etc + *

+ * For int[] values, each int should be separated by a comma, e.g. + *

+ * key1=value1;intArrayKey1=1:2:3 + * + * @param str The string representation of the mutable db options + * + * @return A builder for the mutable db options + */ public static MutableDBOptionsBuilder parse(final String str) { return parse(str, false); } private interface MutableDBOptionKey extends MutableOptionKey {} + /** + * Database options. + */ public enum DBOption implements MutableDBOptionKey { + /** + * Maximum number of background jobs. + */ max_background_jobs(ValueType.INT), + + /** + * Maximum number of background compactions. + */ max_background_compactions(ValueType.INT), + + /** + * Whether to avoid flush during shutdown. + */ avoid_flush_during_shutdown(ValueType.BOOLEAN), + + /** + * Max buffer size for writing to files. + */ writable_file_max_buffer_size(ValueType.LONG), + + /** + * Delayed write rate. + */ delayed_write_rate(ValueType.LONG), + + /** + * Maximum total size of the WAL. + */ max_total_wal_size(ValueType.LONG), + + /** + * The period to delete obsolete file. + * Measured in microseconds. + */ delete_obsolete_files_period_micros(ValueType.LONG), + + /** + * The period to dump statistics. + * Measured in seconds. + */ stats_dump_period_sec(ValueType.INT), + + /** + * The period that statistics persist. + * Measured in seconds. + */ stats_persist_period_sec(ValueType.INT), + + /** + * Buffer size for statistics history. + */ stats_history_buffer_size(ValueType.LONG), + + /** + * Maximum number of open files. + */ max_open_files(ValueType.INT), + + /** + * Bytes per sync. + */ bytes_per_sync(ValueType.LONG), + + /** + * WAL bytes per sync. + */ wal_bytes_per_sync(ValueType.LONG), + + /** + * Strict limit of bytes per sync. + */ strict_bytes_per_sync(ValueType.BOOLEAN), + + /** + * Compaction readahead size. + */ compaction_readahead_size(ValueType.LONG), + /** + * Signifies periods characterized by significantly less read and write activity compared to other times. + */ daily_offpeak_time_utc(ValueType.STRING); private final ValueType valueType; @@ -91,6 +175,9 @@ public ValueType getValueType() { } } + /** + * Builder for constructing MutableDBOptions. + */ public static class MutableDBOptionsBuilder extends AbstractMutableOptionsBuilder implements MutableDBOptionsInterface { diff --git a/java/src/main/java/org/rocksdb/MutableDBOptionsInterface.java b/java/src/main/java/org/rocksdb/MutableDBOptionsInterface.java index 37c654454af3..304b1cc4cf2e 100644 --- a/java/src/main/java/org/rocksdb/MutableDBOptionsInterface.java +++ b/java/src/main/java/org/rocksdb/MutableDBOptionsInterface.java @@ -1,6 +1,11 @@ // Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. package org.rocksdb; +/** + * Interface for MutableDBOptions. + * + * @param the concrete type of DBOptions. + */ public interface MutableDBOptionsInterface> { /** * Specifies the maximum number of concurrent background jobs (both flushes diff --git a/java/src/main/java/org/rocksdb/MutableOptionKey.java b/java/src/main/java/org/rocksdb/MutableOptionKey.java index 3fb2d8b498c2..2a0b46823bd9 100644 --- a/java/src/main/java/org/rocksdb/MutableOptionKey.java +++ b/java/src/main/java/org/rocksdb/MutableOptionKey.java @@ -1,18 +1,63 @@ // Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. package org.rocksdb; +/** + * Mutable Option keys. + */ public interface MutableOptionKey { + + /** + * Types of values used for Mutable Options, + */ enum ValueType { + + /** + * Double precision floating point number. + */ DOUBLE, + + /** + * 64 bit signed integer. + */ LONG, + + /** + * 32 bit signed integer. + */ INT, + + /** + * Boolean. + */ BOOLEAN, + + /** + * Array of 32 bit signed integers. + */ INT_ARRAY, + + /** + * Enumeration. + */ ENUM, - STRING, + /** + * String. + */ + STRING, } + /** + * Get the name of the MutableOption key. + * + * @return the name of the key. + */ String name(); + + /** + * Get the value type of the MutableOption. + * + * @return the value type. + */ ValueType getValueType(); } diff --git a/java/src/main/java/org/rocksdb/MutableOptionValue.java b/java/src/main/java/org/rocksdb/MutableOptionValue.java index fe689b5d01b0..bded79e8d759 100644 --- a/java/src/main/java/org/rocksdb/MutableOptionValue.java +++ b/java/src/main/java/org/rocksdb/MutableOptionValue.java @@ -3,6 +3,11 @@ import static org.rocksdb.AbstractMutableOptions.INT_ARRAY_INT_SEPARATOR; +/** + * Base class for the value of a mutable option. + * + * @param the concrete type of the value. + */ public abstract class MutableOptionValue { abstract double asDouble() throws NumberFormatException; diff --git a/java/src/main/java/org/rocksdb/OperationStage.java b/java/src/main/java/org/rocksdb/OperationStage.java index 6ac0a15a2442..2ded8d8a642d 100644 --- a/java/src/main/java/org/rocksdb/OperationStage.java +++ b/java/src/main/java/org/rocksdb/OperationStage.java @@ -9,16 +9,59 @@ * The operation stage. */ public enum OperationStage { + /** + * Unknown. + */ STAGE_UNKNOWN((byte)0x0), + + /** + * Flush. + */ STAGE_FLUSH_RUN((byte)0x1), + + /** + * Flush writing Level 0. + */ STAGE_FLUSH_WRITE_L0((byte)0x2), + + /** + * Preparing compaction. + */ STAGE_COMPACTION_PREPARE((byte)0x3), + + /** + * Compaction. + */ STAGE_COMPACTION_RUN((byte)0x4), + + /** + * Compaction processing a key-value. + */ STAGE_COMPACTION_PROCESS_KV((byte)0x5), + + /** + * Installing compaction. + */ STAGE_COMPACTION_INSTALL((byte)0x6), + + /** + * Compaction syncing a file. + */ STAGE_COMPACTION_SYNC_FILE((byte)0x7), + + /** + * Picking Memtable(s) to flush. + */ STAGE_PICK_MEMTABLES_TO_FLUSH((byte)0x8), + + /** + * Rolling back Memtable(s). + */ STAGE_MEMTABLE_ROLLBACK((byte)0x9), + + /** + * Installing Memtable flush results. + */ STAGE_MEMTABLE_INSTALL_FLUSH_RESULTS((byte)0xA); private final byte value; diff --git a/java/src/main/java/org/rocksdb/OperationType.java b/java/src/main/java/org/rocksdb/OperationType.java index bf73534683cc..0279e9e3b2f4 100644 --- a/java/src/main/java/org/rocksdb/OperationType.java +++ b/java/src/main/java/org/rocksdb/OperationType.java @@ -12,9 +12,24 @@ * examples include compaction and flush. */ public enum OperationType { + /** + * Unknown. + */ OP_UNKNOWN((byte)0x0), + + /** + * Compaction. + */ OP_COMPACTION((byte)0x1), + + /** + * Flush. + */ OP_FLUSH((byte) 0x2), + + /** + * DB Open. + */ OP_DBOPEN((byte) 0x3); private final byte value; diff --git a/java/src/main/java/org/rocksdb/OptimisticTransactionDB.java b/java/src/main/java/org/rocksdb/OptimisticTransactionDB.java index 4674eae010ef..42104bfcbe2b 100644 --- a/java/src/main/java/org/rocksdb/OptimisticTransactionDB.java +++ b/java/src/main/java/org/rocksdb/OptimisticTransactionDB.java @@ -229,9 +229,9 @@ protected final void disposeInternal(final long handle) { } private static native void disposeInternalJni(final long handle); - protected static native long open(final long optionsHandle, + private static native long open(final long optionsHandle, final String path) throws RocksDBException; - protected static native long[] open(final long handle, final String path, + private static native long[] open(final long handle, final String path, final byte[][] columnFamilyNames, final long[] columnFamilyOptions); private static native void closeDatabase(final long handle) throws RocksDBException; private static native long beginTransaction(final long handle, final long writeOptionsHandle); diff --git a/java/src/main/java/org/rocksdb/OptimisticTransactionOptions.java b/java/src/main/java/org/rocksdb/OptimisticTransactionOptions.java index f4111c7b1875..5f1f1842cac5 100644 --- a/java/src/main/java/org/rocksdb/OptimisticTransactionOptions.java +++ b/java/src/main/java/org/rocksdb/OptimisticTransactionOptions.java @@ -5,9 +5,15 @@ package org.rocksdb; +/** + * Options for an Optimistic Transaction. + */ public class OptimisticTransactionOptions extends RocksObject implements TransactionalOptions { + /** + * Constructs an OptimisticTransactionOptions. + */ public OptimisticTransactionOptions() { super(newOptimisticTransactionOptions()); } diff --git a/java/src/main/java/org/rocksdb/OptionString.java b/java/src/main/java/org/rocksdb/OptionString.java index d9e7e2689108..645afb1dd4b1 100644 --- a/java/src/main/java/org/rocksdb/OptionString.java +++ b/java/src/main/java/org/rocksdb/OptionString.java @@ -9,6 +9,9 @@ import java.util.List; import java.util.Objects; +/** + * An option expressed as a String. + */ @SuppressWarnings("PMD.AvoidStringBufferField") public class OptionString { private static final char kvPairSeparator = ';'; @@ -21,23 +24,51 @@ public class OptionString { private static final char escapeChar = '\\'; + /** + * The value of the option. + */ static class Value { final List list; final List complex; + /** + * Constructs a Value. + * + * @param list the list of values. + * @param complex the list of complex values. + */ public Value(final List list, final List complex) { this.list = list; this.complex = complex; } + /** + * Returns true if the value is a list. + * + * @return true if the value is a list, false otherwise. + */ public boolean isList() { return (this.list != null && this.complex == null); } + /** + * Constructs a value from a list. + * + * @param list a list of string values. + * + * @return the value. + */ public static Value fromList(final List list) { return new Value(list, null); } + /** + * Constructs a value from a complex value. + * + * @param complex the complex value. + * + * @return the value. + */ public static Value fromComplex(final List complex) { return new Value(null, complex); } @@ -256,6 +287,13 @@ private List parseComplex() { return entries; } + /** + * Parse a string into a list of entry. + * + * @param str the string. + * + * @return the list of entry. + */ public static List parse(final String str) { Objects.requireNonNull(str); diff --git a/java/src/main/java/org/rocksdb/OptionsUtil.java b/java/src/main/java/org/rocksdb/OptionsUtil.java index 642599205d62..822ecb56dfa7 100644 --- a/java/src/main/java/org/rocksdb/OptionsUtil.java +++ b/java/src/main/java/org/rocksdb/OptionsUtil.java @@ -7,6 +7,9 @@ import java.util.List; +/** + * Utility functions to assist in working with Options. + */ public class OptionsUtil { /** * A static method to construct the DBOptions and ColumnFamilyDescriptors by diff --git a/java/src/main/java/org/rocksdb/PerfContext.java b/java/src/main/java/org/rocksdb/PerfContext.java index 2d9ac3203098..a9f6402bb76b 100644 --- a/java/src/main/java/org/rocksdb/PerfContext.java +++ b/java/src/main/java/org/rocksdb/PerfContext.java @@ -5,11 +5,23 @@ package org.rocksdb; +/** + * Performance Context. + */ public class PerfContext extends RocksObject { + + /** + * Constructs a PerfContext. + * + * @param nativeHandle reference to the value of the C++ pointer pointing to the underlying native RocksDB C++ PerfContext. + */ protected PerfContext(final long nativeHandle) { super(nativeHandle); } + /** + * Reset the performance context. + */ public void reset() { reset(nativeHandle_); } @@ -42,8 +54,8 @@ public long getBlockReadByte() { return getBlockReadByte(nativeHandle_); } - /* - @return total nanos spent on block reads + /** + * @return total nanos spent on block reads */ public long getBlockReadTime() { return getBlockReadTime(nativeHandle_); @@ -220,7 +232,7 @@ public long getBlobDecompressTime() { } /** - * total number of internal keys skipped over during iteration. + * Get the total number of internal keys skipped over during iteration. * There are several reasons for it: * 1. when calling Next(), the iterator is in the position of the previous * key, so that we'll need to skip it. It means this counter will always @@ -236,51 +248,64 @@ public long getBlobDecompressTime() { * hidden by the tombstones will be included here. * 4. symmetric cases for Prev() and SeekToLast() * internal_recent_skipped_count is not included in this counter. + * + * @return the total number of internal keys skipped over during iteration */ public long getInternalKeySkippedCount() { return getInternalKeySkippedCount(nativeHandle_); } /** - * Total number of deletes and single deletes skipped over during iteration + * Get the Total number of deletes and single deletes skipped over during iteration * When calling Next(), Seek() or SeekToFirst(), after previous position * before calling Next(), the seek key in Seek() or the beginning for * SeekToFirst(), there may be one or more deleted keys before the next valid * key. Every deleted key is counted once. We don't recount here if there are * still older updates invalidated by the tombstones. + * + * @return total number of deletes and single deletes skipped over during iteration. */ public long getInternalDeleteSkippedCount() { return getInternalDeleteSkippedCount(nativeHandle_); } /** - * How many times iterators skipped over internal keys that are more recent + * Get how many times iterators skipped over internal keys that are more recent * than the snapshot that iterator is using. + * + * @return the number of times iterators skipped over internal keys that are more recent + * than the snapshot that iterator is using. */ public long getInternalRecentSkippedCount() { return getInternalRecentSkippedCount(nativeHandle_); } /** - * How many merge operands were fed into the merge operator by iterators. + * Get how many merge operands were fed into the merge operator by iterators. * Note: base values are not included in the count. + * + * @return the number of merge operands that were fed into the merge operator by iterators. */ public long getInternalMergeCount() { return getInternalMergeCount(nativeHandle_); } /** - * How many merge operands were fed into the merge operator by point lookups. + * Get how many merge operands were fed into the merge operator by point lookups. * Note: base values are not included in the count. + * + * @return the number of merge operands yjay were fed into the merge operator by point lookups. */ public long getInternalMergePointLookupCount() { return getInternalMergePointLookupCount(nativeHandle_); } /** - * Number of times we reseeked inside a merging iterator, specifically to skip + * Get the number of times we re-seek'd inside a merging iterator, specifically to skip * after or before a range of keys covered by a range deletion in a newer LSM * component. + * + * @return the number of times we re-seek'd inside a merging iterator. */ public long getInternalRangeDelReseekCount() { return getInternalRangeDelReseekCount(nativeHandle_); @@ -485,26 +510,37 @@ public long getNewTableIteratorNanos() { } /** + * Get total time of mem table block seeks in nanoseconds. + * * @return Time spent on seeking a key in data/index blocks */ public long getBlockSeekNanos() { return getBlockSeekNanos(nativeHandle_); } + /** - * @return Time spent on finding or creating a table reader + * Get total time spent on finding or creating a table reader. + * + * @return the time spent on finding or creating a table reader */ public long getFindTableNanos() { return getFindTableNanos(nativeHandle_); } /** + * Get total number of mem table bloom hits. + * * @return total number of mem table bloom hits */ public long getBloomMemtableHitCount() { return getBloomMemtableHitCount(nativeHandle_); } - // total number of mem table bloom misses + /** + * Get total number of mem table bloom misses. + * + * @return total number of mem table bloom misses. + */ public long getBloomMemtableMissCount() { return getBloomMemtableMissCount(nativeHandle_); } @@ -544,91 +580,209 @@ public long getEnvNewSequentialFileNanos() { return getEnvNewSequentialFileNanos(nativeHandle_); } + /** + * Get the time taken in nanoseconds for creating new random access file(s) in the environment. + * + * @return the total time + */ public long getEnvNewRandomAccessFileNanos() { return getEnvNewRandomAccessFileNanos(nativeHandle_); } + /** + * Get the time taken in nanoseconds for creating new writable file(s) in the environment. + * + * @return the total time + */ public long getEnvNewWritableFileNanos() { return getEnvNewWritableFileNanos(nativeHandle_); } + /** + * Get the time taken in nanoseconds for reusing random access file(s) in the environment. + * + * @return the total time + */ public long getEnvReuseWritableFileNanos() { return getEnvReuseWritableFileNanos(nativeHandle_); } + /** + * Get the time taken in nanoseconds for creating new random access read-write file(s) in the environment. + * + * @return the total time + */ public long getEnvNewRandomRwFileNanos() { return getEnvNewRandomRwFileNanos(nativeHandle_); } + /** + * Get the time taken in nanoseconds for creating new directory(s) in the environment. + * + * @return the total time + */ public long getEnvNewDirectoryNanos() { return getEnvNewDirectoryNanos(nativeHandle_); } + /** + * Get the time taken in nanoseconds for checking if a file exists in the environment. + * + * @return the total time + */ public long getEnvFileExistsNanos() { return getEnvFileExistsNanos(nativeHandle_); } + + /** + * Get the time taken in nanoseconds for getting children in the environment. + * + * @return the total time + */ public long getEnvGetChildrenNanos() { return getEnvGetChildrenNanos(nativeHandle_); } + /** + * Get the time taken in nanoseconds for child file attributes in the environment. + * + * @return the total time + */ public long getEnvGetChildrenFileAttributesNanos() { return getEnvGetChildrenFileAttributesNanos(nativeHandle_); } + /** + * Get the time taken in nanoseconds for deleting file(s) in the environment. + * + * @return the total time + */ public long getEnvDeleteFileNanos() { return getEnvDeleteFileNanos(nativeHandle_); } + /** + * Get the time taken in nanoseconds for creating directories(s) in the environment. + * + * @return the total time + */ public long getEnvCreateDirNanos() { return getEnvCreateDirNanos(nativeHandle_); } + + /** + * Get the time taken in nanoseconds for creating directories(s) (only if not already existing) in the environment. + * + * @return the total time + */ public long getEnvCreateDirIfMissingNanos() { return getEnvCreateDirIfMissingNanos(nativeHandle_); } + /** + * Get the time taken in nanoseconds for deleting directories(s) in the environment. + * + * @return the total time + */ public long getEnvDeleteDirNanos() { return getEnvDeleteDirNanos(nativeHandle_); } + /** + * Get the time taken in nanoseconds for getting file size(s) in the environment. + * + * @return the total time + */ public long getEnvGetFileSizeNanos() { return getEnvGetFileSizeNanos(nativeHandle_); } + /** + * Get the time taken in nanoseconds for getting file modification time(s) in the environment. + * + * @return the total time + */ public long getEnvGetFileModificationTimeNanos() { return getEnvGetFileModificationTimeNanos(nativeHandle_); } + /** + * Get the time taken in nanoseconds for renaming file(s) in the environment. + * + * @return the total time + */ public long getEnvRenameFileNanos() { return getEnvRenameFileNanos(nativeHandle_); } + /** + * Get the time taken in nanoseconds for linking file(s) in the environment. + * + * @return the total time + */ public long getEnvLinkFileNanos() { return getEnvLinkFileNanos(nativeHandle_); } + /** + * Get the time taken in nanoseconds for locking file(s) in the environment. + * + * @return the total time + */ public long getEnvLockFileNanos() { return getEnvLockFileNanos(nativeHandle_); } + /** + * Get the time taken in nanoseconds for unlocking file(s) in the environment. + * + * @return the total time + */ public long getEnvUnlockFileNanos() { return getEnvUnlockFileNanos(nativeHandle_); } + /** + * Get the time taken in nanoseconds for creating loggers in the environment. + * + * @return the total time + */ public long getEnvNewLoggerNanos() { return getEnvNewLoggerNanos(nativeHandle_); } + /** + * Get the CPU time consumed in the environment. + * + * @return the total time + */ public long getGetCpuNanos() { return getGetCpuNanos(nativeHandle_); } + /** + * Get the CPU time consumed by calling 'next' on iterator(s) in the environment. + * + * @return the total time + */ public long getIterNextCpuNanos() { return getIterNextCpuNanos(nativeHandle_); } + + /** + * Get the CPU time consumed by calling 'prev' on iterator(s) in the environment. + * + * @return the total time + */ public long getIterPrevCpuNanos() { return getIterPrevCpuNanos(nativeHandle_); } + /** + * Get the CPU time consumed by calling 'seek' on iterator(s) in the environment. + * + * @return the total time + */ public long getIterSeekCpuNanos() { return getIterSeekCpuNanos(nativeHandle_); } @@ -647,6 +801,9 @@ public long getDecryptDataNanos() { return getDecryptDataNanos(nativeHandle_); } + /** + * @return the number of asynchronous seeks. + */ public long getNumberAsyncSeek() { return getNumberAsyncSeek(nativeHandle_); } diff --git a/java/src/main/java/org/rocksdb/PerfLevel.java b/java/src/main/java/org/rocksdb/PerfLevel.java index 332e6d7d977b..a0db8a3286c4 100644 --- a/java/src/main/java/org/rocksdb/PerfLevel.java +++ b/java/src/main/java/org/rocksdb/PerfLevel.java @@ -5,6 +5,9 @@ package org.rocksdb; +/** + * Performance monitoring levels. + */ public enum PerfLevel { /** * Unknown setting @@ -45,16 +48,31 @@ public enum PerfLevel { private final byte _value; + /** + * Get the internal representation value. + * + * @return the internal representation value. + */ public byte getValue() { return _value; } + /** + * Get the PerfLevel from the internal representation value. + * + * @param level the internal representation value. + * + * @return the PerfLevel + * + * @throws IllegalArgumentException if the value does not match a + * PerfLevel + */ public static PerfLevel getPerfLevel(byte level) { for (PerfLevel l : PerfLevel.values()) { if (l.getValue() == level) { return l; } } - throw new IllegalArgumentException("Uknknown PerfLevel constant : " + level); + throw new IllegalArgumentException("Unknown PerfLevel constant : " + level); } } diff --git a/java/src/main/java/org/rocksdb/PersistentCache.java b/java/src/main/java/org/rocksdb/PersistentCache.java index 900e7d1393bc..e9e34343a6ab 100644 --- a/java/src/main/java/org/rocksdb/PersistentCache.java +++ b/java/src/main/java/org/rocksdb/PersistentCache.java @@ -11,6 +11,17 @@ */ public class PersistentCache extends RocksObject { + /** + * Constructs a persistent cache. + * + * @param env the environment. + * @param path the path for the cache. + * @param size the size of the cache. + * @param logger the logger to use. + * @param optimizedForNvm true to optimize for NVM, false otherwise. + * + * @throws RocksDBException if the cache cannot be created. + */ public PersistentCache(final Env env, final String path, final long size, final Logger logger, final boolean optimizedForNvm) throws RocksDBException { diff --git a/java/src/main/java/org/rocksdb/PlainTableConfig.java b/java/src/main/java/org/rocksdb/PlainTableConfig.java index 1331f5b0a2ac..01bd76c73736 100644 --- a/java/src/main/java/org/rocksdb/PlainTableConfig.java +++ b/java/src/main/java/org/rocksdb/PlainTableConfig.java @@ -13,17 +13,52 @@ *

It also support prefix hash feature.

*/ public class PlainTableConfig extends TableFormatConfig { + + /** + * Indicates that the key sizew can be variable length. + */ public static final int VARIABLE_LENGTH = 0; + + /** + * The default bits per key in the bloom filter. + */ public static final int DEFAULT_BLOOM_BITS_PER_KEY = 10; + + /** + * The default ratio of the hash table. + */ public static final double DEFAULT_HASH_TABLE_RATIO = 0.75; + + /** + * The default sparseness factor of the index. + */ public static final int DEFAULT_INDEX_SPARSENESS = 16; + + /** + * The default size of the huge TLB. + */ public static final int DEFAULT_HUGE_TLB_SIZE = 0; + + /** + * The default encoding type. + */ public static final EncodingType DEFAULT_ENCODING_TYPE = EncodingType.kPlain; + + /** + * The default full scan mode. + */ public static final boolean DEFAULT_FULL_SCAN_MODE = false; + + /** + * The default setting for whether to store the index in a file. + */ public static final boolean DEFAULT_STORE_INDEX_IN_FILE = false; + /** + * Constructs a PlainTableConfig with the default settings. + */ public PlainTableConfig() { keySize_ = VARIABLE_LENGTH; bloomBitsPerKey_ = DEFAULT_BLOOM_BITS_PER_KEY; diff --git a/java/src/main/java/org/rocksdb/PrepopulateBlobCache.java b/java/src/main/java/org/rocksdb/PrepopulateBlobCache.java index f1237aa7c95b..d2a02f6a9271 100644 --- a/java/src/main/java/org/rocksdb/PrepopulateBlobCache.java +++ b/java/src/main/java/org/rocksdb/PrepopulateBlobCache.java @@ -18,7 +18,15 @@ * system since it involves network traffic and higher latencies.

*/ public enum PrepopulateBlobCache { + + /** + * Disable pre-populating the blob cache + */ PREPOPULATE_BLOB_DISABLE((byte) 0x0, "prepopulate_blob_disable", "kDisable"), + + /** + * Only pre-populate on BLOB flush. + */ PREPOPULATE_BLOB_FLUSH_ONLY((byte) 0x1, "prepopulate_blob_flush_only", "kFlushOnly"); /** diff --git a/java/src/main/java/org/rocksdb/Priority.java b/java/src/main/java/org/rocksdb/Priority.java index 34a56edcbcde..ac656f9a350f 100644 --- a/java/src/main/java/org/rocksdb/Priority.java +++ b/java/src/main/java/org/rocksdb/Priority.java @@ -9,9 +9,25 @@ * The Thread Pool priority. */ public enum Priority { + + /** + * Bottom most priority. + */ BOTTOM((byte) 0x0), + + /** + * Low priority. + */ LOW((byte) 0x1), + + /** + * High priority. + */ HIGH((byte)0x2), + + /** + * maximum number of priority levels. + */ TOTAL((byte)0x3); private final byte value; diff --git a/java/src/main/java/org/rocksdb/Range.java b/java/src/main/java/org/rocksdb/Range.java index 74c85e5f04f3..16f4dbe2567e 100644 --- a/java/src/main/java/org/rocksdb/Range.java +++ b/java/src/main/java/org/rocksdb/Range.java @@ -12,6 +12,13 @@ public class Range { final Slice start; final Slice limit; + /** + * Constructs a Range. + * + * + * @param start the start of the range + * @param limit the end (start+limit) of the range + */ public Range(final Slice start, final Slice limit) { this.start = start; this.limit = limit; diff --git a/java/src/main/java/org/rocksdb/RateLimiter.java b/java/src/main/java/org/rocksdb/RateLimiter.java index 4fa5551b7876..caf73e749020 100644 --- a/java/src/main/java/org/rocksdb/RateLimiter.java +++ b/java/src/main/java/org/rocksdb/RateLimiter.java @@ -12,10 +12,26 @@ * @since 3.10.0 */ public class RateLimiter extends RocksObject { + + /** + * The default refill period in microseconds. + */ public static final long DEFAULT_REFILL_PERIOD_MICROS = 100 * 1000; + + /** + * The default fairness parameter value. + */ public static final int DEFAULT_FAIRNESS = 10; + + /** + * The default rate limiter mode. + */ public static final RateLimiterMode DEFAULT_MODE = RateLimiterMode.WRITES_ONLY; + + /** + * The default of whether to enable auto-tune. + */ public static final boolean DEFAULT_AUTOTUNE = false; /** diff --git a/java/src/main/java/org/rocksdb/RateLimiterMode.java b/java/src/main/java/org/rocksdb/RateLimiterMode.java index 4b029d8165e2..d0bdc3882b1d 100644 --- a/java/src/main/java/org/rocksdb/RateLimiterMode.java +++ b/java/src/main/java/org/rocksdb/RateLimiterMode.java @@ -9,8 +9,20 @@ * Mode for {@link RateLimiter#RateLimiter(long, long, int, RateLimiterMode)}. */ public enum RateLimiterMode { + + /** + * Only rate limit reads. + */ READS_ONLY((byte)0x0), + + /** + * Only rate limit writes. + */ WRITES_ONLY((byte)0x1), + + /** + * Rate limit all IO. + */ ALL_IO((byte)0x2); private final byte value; diff --git a/java/src/main/java/org/rocksdb/ReadOptions.java b/java/src/main/java/org/rocksdb/ReadOptions.java index 8cc9883d23cd..26503cb22544 100644 --- a/java/src/main/java/org/rocksdb/ReadOptions.java +++ b/java/src/main/java/org/rocksdb/ReadOptions.java @@ -12,11 +12,17 @@ * become out-of-scope to release the allocated memory in c++. */ public class ReadOptions extends RocksObject { + + /** + * Constructs a ReadOptions. + */ public ReadOptions() { super(newReadOptions()); } /** + * Constructs a ReadOptions. + * * @param verifyChecksums verification will be performed on every read * when set to true * @param fillCache if true, then fill-cache behavior will be performed. diff --git a/java/src/main/java/org/rocksdb/ReadTier.java b/java/src/main/java/org/rocksdb/ReadTier.java index 78f83f6ad657..b200823544ca 100644 --- a/java/src/main/java/org/rocksdb/ReadTier.java +++ b/java/src/main/java/org/rocksdb/ReadTier.java @@ -9,9 +9,25 @@ * RocksDB {@link ReadOptions} read tiers. */ public enum ReadTier { + + /** + * Read all tiers. + */ READ_ALL_TIER((byte)0), + + /** + * Read block cache. + */ BLOCK_CACHE_TIER((byte)1), + + /** + * Read persisted. + */ PERSISTED_TIER((byte)2), + + /** + * Read Memtable(s). + */ MEMTABLE_TIER((byte)3); private final byte value; diff --git a/java/src/main/java/org/rocksdb/RemoveEmptyValueCompactionFilter.java b/java/src/main/java/org/rocksdb/RemoveEmptyValueCompactionFilter.java index e96694313b4a..935828d0e1dd 100644 --- a/java/src/main/java/org/rocksdb/RemoveEmptyValueCompactionFilter.java +++ b/java/src/main/java/org/rocksdb/RemoveEmptyValueCompactionFilter.java @@ -6,10 +6,14 @@ package org.rocksdb; /** - * Just a Java wrapper around EmptyValueCompactionFilter implemented in C++ + * Just a Java wrapper around EmptyValueCompactionFilter implemented in C++. */ public class RemoveEmptyValueCompactionFilter extends AbstractCompactionFilter { + + /** + * Constructs a RemoveEmptyValueCompactionFilter. + */ public RemoveEmptyValueCompactionFilter() { super(createNewRemoveEmptyValueCompactionFilter0()); } diff --git a/java/src/main/java/org/rocksdb/RocksCallbackObject.java b/java/src/main/java/org/rocksdb/RocksCallbackObject.java index 8fd11d3e5b0c..345eeb2b7fc2 100644 --- a/java/src/main/java/org/rocksdb/RocksCallbackObject.java +++ b/java/src/main/java/org/rocksdb/RocksCallbackObject.java @@ -30,6 +30,11 @@ public abstract class RocksCallbackObject extends */ protected final long nativeHandle_; + /** + * Constructs a RocksCallbackObject. + * + * @param nativeParameterHandles reference to the value of the C++ pointers pointing to the underlying native RocksDB C++ objects. + */ protected RocksCallbackObject(final long... nativeParameterHandles) { super(true); this.nativeHandle_ = initializeNative(nativeParameterHandles); diff --git a/java/src/main/java/org/rocksdb/RocksDB.java b/java/src/main/java/org/rocksdb/RocksDB.java index 865fb2bb097d..da17cf02241a 100644 --- a/java/src/main/java/org/rocksdb/RocksDB.java +++ b/java/src/main/java/org/rocksdb/RocksDB.java @@ -21,7 +21,15 @@ * indicates sth wrong at the RocksDB library side and the call failed. */ public class RocksDB extends RocksObject { + + /** + * The name of the default column family. + */ public static final byte[] DEFAULT_COLUMN_FAMILY = "default".getBytes(UTF_8); + + /** + * A constant representing a result where something was searched for but not found. + */ public static final int NOT_FOUND = -1; private enum LibraryState { @@ -165,6 +173,11 @@ private static void waitForLibraryToBeLoaded() { } } + /** + * Get the RocksDB version. + * + * @return the version of RocksDB. + */ public static Version rocksdbVersion() { return version; } @@ -808,6 +821,9 @@ public List createColumnFamilies( * The ColumnFamilyHandle is automatically disposed with DB disposal. * * @param columnFamilyDescriptor column family to be created. + * @param importColumnFamilyOptions the options for the import. + * @param metadata the metadata for the imported file. + * * @return {@link org.rocksdb.ColumnFamilyHandle} instance. * * @throws RocksDBException thrown if error happens in underlying @@ -823,6 +839,21 @@ public ColumnFamilyHandle createColumnFamilyWithImport( columnFamilyDescriptor, importColumnFamilyOptions, metadatas); } + /** + * Creates a new column family with the name columnFamilyName and + * import external SST files specified in `metadata` allocates a + * ColumnFamilyHandle within an internal structure. + * The ColumnFamilyHandle is automatically disposed with DB disposal. + * + * @param columnFamilyDescriptor column family to be created. + * @param importColumnFamilyOptions the options for the import. + * @param metadatas the metadata for the imported files. + * + * @return {@link org.rocksdb.ColumnFamilyHandle} instance. + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ public ColumnFamilyHandle createColumnFamilyWithImport( final ColumnFamilyDescriptor columnFamilyDescriptor, final ImportColumnFamilyOptions importColumnFamilyOptions, @@ -857,10 +888,17 @@ public void dropColumnFamily(final ColumnFamilyHandle columnFamilyHandle) dropColumnFamily(nativeHandle_, columnFamilyHandle.nativeHandle_); } - // Bulk drop column families. This call only records drop records in the - // manifest and prevents the column families from flushing and compacting. - // In case of error, the request may succeed partially. User may call - // ListColumnFamilies to check the result. + /** + * Bulk drop column families. This call only records drop records in the + * manifest and prevents the column families from flushing and compacting. + * In case of error, the request may succeed partially. User may call + * {@link #listColumnFamilies(Options, String)} to check the result. + * + * @param columnFamilies the column families to drop. + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ public void dropColumnFamilies( final List columnFamilies) throws RocksDBException { final long[] cfHandles = new long[columnFamilies.size()]; @@ -1716,6 +1754,19 @@ public void merge(final WriteOptions writeOpts, key, offset, len, value, vOffset, vLen); } + /** + * Add merge operand for key/value pair. + * + * @param writeOpts {@link WriteOptions} for this write. + * @param key the specified key to be merged. + * @param value the value to be merged with the current value for + * the specified key. + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + * + * @throws IndexOutOfBoundsException if an offset or length is out of bounds + */ public void merge(final WriteOptions writeOpts, final ByteBuffer key, final ByteBuffer value) throws RocksDBException { if (key.isDirect() && value.isDirect()) { @@ -1734,6 +1785,20 @@ public void merge(final WriteOptions writeOpts, final ByteBuffer key, final Byte value.position(value.limit()); } + /** + * Add merge operand for key/value pair. + * + * @param columnFamilyHandle the column family. + * @param writeOpts {@link WriteOptions} for this write. + * @param key the specified key to be merged. + * @param value the value to be merged with the current value for + * the specified key. + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + * + * @throws IndexOutOfBoundsException if an offset or length is out of bounds + */ public void merge(final ColumnFamilyHandle columnFamilyHandle, final WriteOptions writeOpts, final ByteBuffer key, final ByteBuffer value) throws RocksDBException { if (key.isDirect() && value.isDirect()) { @@ -1944,6 +2009,7 @@ public int get(final byte[] key, final int offset, final int len, * * @throws RocksDBException thrown if error happens in underlying * native library. + * @throws IllegalArgumentException if the arguments are invalid. */ public int get(final ColumnFamilyHandle columnFamilyHandle, final byte[] key, final byte[] value) throws RocksDBException, IllegalArgumentException { @@ -1976,6 +2042,7 @@ public int get(final ColumnFamilyHandle columnFamilyHandle, final byte[] key, * * @throws RocksDBException thrown if error happens in underlying * native library. + * @throws IllegalArgumentException if the arguments are invalid. */ public int get(final ColumnFamilyHandle columnFamilyHandle, final byte[] key, final int offset, final int len, final byte[] value, final int vOffset, @@ -2584,11 +2651,11 @@ public List multiGetByteBuffers(final ReadOptions readOptio * Check if a key exists in the database. * This method is not as lightweight as {@code keyMayExist} but it gives a 100% guarantee * of a correct result, whether the key exists or not. - * + *

* Internally it checks if the key may exist and then double checks with read operation * that confirms the key exists. This deals with the case where {@code keyMayExist} may return * a false positive. - * + *

* The code crosses the Java/JNI boundary only once. * @param key byte array of a key to search for* * @return true if key exist in database, otherwise false. @@ -2600,11 +2667,11 @@ public boolean keyExists(final byte[] key) { * Check if a key exists in the database. * This method is not as lightweight as {@code keyMayExist} but it gives a 100% guarantee * of a correct result, whether the key exists or not. - * + *

* Internally it checks if the key may exist and then double checks with read operation * that confirms the key exists. This deals with the case where {@code keyMayExist} may return * a false positive. - * + *

* The code crosses the Java/JNI boundary only once. * @param key byte array of a key to search for * @param offset the offset of the "key" array to be used, must be @@ -2620,11 +2687,11 @@ public boolean keyExists(final byte[] key, final int offset, final int len) { * Check if a key exists in the database. * This method is not as lightweight as {@code keyMayExist} but it gives a 100% guarantee * of a correct result, whether the key exists or not. - * + *

* Internally it checks if the key may exist and then double checks with read operation * that confirms the key exists. This deals with the case where {@code keyMayExist} may return * a false positive. - * + *

* The code crosses the Java/JNI boundary only once. * * @param columnFamilyHandle {@link ColumnFamilyHandle} instance @@ -2639,11 +2706,11 @@ public boolean keyExists(final ColumnFamilyHandle columnFamilyHandle, final byte * Check if a key exists in the database. * This method is not as lightweight as {@code keyMayExist} but it gives a 100% guarantee * of a correct result, whether the key exists or not. - * + *

* Internally it checks if the key may exist and then double checks with read operation * that confirms the key exists. This deals with the case where {@code keyMayExist} may return * a false positive. - * + *

* The code crosses the Java/JNI boundary only once. * * @param columnFamilyHandle {@link ColumnFamilyHandle} instance @@ -2663,11 +2730,11 @@ public boolean keyExists(final ColumnFamilyHandle columnFamilyHandle, final byte * Check if a key exists in the database. * This method is not as lightweight as {@code keyMayExist} but it gives a 100% guarantee * of a correct result, whether the key exists or not. - * + *

* Internally it checks if the key may exist and then double checks with read operation * that confirms the key exists. This deals with the case where {@code keyMayExist} may return * a false positive. - * + *

* The code crosses the Java/JNI boundary only once. * * @param readOptions {@link ReadOptions} instance @@ -2682,11 +2749,11 @@ public boolean keyExists(final ReadOptions readOptions, final byte[] key) { * Check if a key exists in the database. * This method is not as lightweight as {@code keyMayExist} but it gives a 100% guarantee * of a correct result, whether the key exists or not. - * + *

* Internally it checks if the key may exist and then double checks with read operation * that confirms the key exists. This deals with the case where {@code keyMayExist} may return * a false positive. - * + *

* The code crosses the Java/JNI boundary only once. * * @param readOptions {@link ReadOptions} instance @@ -2706,11 +2773,11 @@ public boolean keyExists( * Check if a key exists in the database. * This method is not as lightweight as {@code keyMayExist} but it gives a 100% guarantee * of a correct result, whether the key exists or not. - * + *

* Internally it checks if the key may exist and then double checks with read operation * that confirms the key exists. This deals with the case where {@code keyMayExist} may return * a false positive. - * + *

* The code crosses the Java/JNI boundary only once. * * @param columnFamilyHandle {@link ColumnFamilyHandle} instance @@ -2727,11 +2794,11 @@ public boolean keyExists(final ColumnFamilyHandle columnFamilyHandle, * Check if a key exists in the database. * This method is not as lightweight as {@code keyMayExist} but it gives a 100% guarantee * of a correct result, whether the key exists or not. - * + *

* Internally it checks if the key may exist and then double checks with read operation * that confirms the key exists. This deals with the case where {@code keyMayExist} may return * a false positive. - * + *

* The code crosses the Java/JNI boundary only once. * * @param columnFamilyHandle {@link ColumnFamilyHandle} instance @@ -2755,11 +2822,11 @@ public boolean keyExists(final ColumnFamilyHandle columnFamilyHandle, * Check if a key exists in the database. * This method is not as lightweight as {@code keyMayExist} but it gives a 100% guarantee * of a correct result, whether the key exists or not. - * + *

* Internally it checks if the key may exist and then double checks with read operation * that confirms the key exists. This deals with the case where {@code keyMayExist} may return * a false positive. - * + *

* The code crosses the Java/JNI boundary only once. * * @param key ByteBuffer with key. Must be allocated as direct. @@ -2773,11 +2840,11 @@ public boolean keyExists(final ByteBuffer key) { * Check if a key exists in the database. * This method is not as lightweight as {@code keyMayExist} but it gives a 100% guarantee * of a correct result, whether the key exists or not. - * + *

* Internally it checks if the key may exist and then double checks with read operation * that confirms the key exists. This deals with the case where {@code keyMayExist} may return * a false positive. - * + *

* The code crosses the Java/JNI boundary only once. * * @param columnFamilyHandle {@link ColumnFamilyHandle} instance @@ -2792,11 +2859,11 @@ public boolean keyExists(final ColumnFamilyHandle columnFamilyHandle, final Byte * Check if a key exists in the database. * This method is not as lightweight as {@code keyMayExist} but it gives a 100% guarantee * of a correct result, whether the key exists or not. - * + *

* Internally it checks if the key may exist and then double checks with read operation * that confirms the key exists. This deals with the case where {@code keyMayExist} may return * a false positive. - * + *

* The code crosses the Java/JNI boundary only once. * * @param readOptions {@link ReadOptions} instance @@ -2811,11 +2878,11 @@ public boolean keyExists(final ReadOptions readOptions, final ByteBuffer key) { * Check if a key exists in the database. * This method is not as lightweight as {@code keyMayExist} but it gives a 100% guarantee * of a correct result, whether the key exists or not. - * + *

* Internally it checks if the key may exist and then double checks with read operation * that confirms the key exists. This deals with the case where {@code keyMayExist} may return * a false positive. - * + *

* The code crosses the Java/JNI boundary only once. * * @param columnFamilyHandle {@link ColumnFamilyHandle} instance @@ -3675,10 +3742,26 @@ public long[] getApproximateSizes(final List ranges, return getApproximateSizes(null, ranges, sizeApproximationFlags); } + /** + * Count and size. + */ public static class CountAndSize { + /** + * The count. + */ public final long count; + + /** + * The size. + */ public final long size; + /** + * Constructs a CountAndSize. + * + * @param count the count. + * @param size the size. + */ public CountAndSize(final long count, final long size) { this.count = count; this.size = size; @@ -3878,7 +3961,9 @@ public void setOptions( /** * Set performance level for rocksdb performance measurement. - * @param level + * + * @param level the performance level + * * @throws IllegalArgumentException for UNINITIALIZED and OUT_OF_BOUNDS values * as they can't be used for settings. */ @@ -3894,7 +3979,8 @@ public void setPerfLevel(final PerfLevel level) { /** * Return current performance level measurement settings. - * @return + * + * @return the performance level */ public PerfLevel getPerfLevel() { byte level = getPerfLevelNative(); @@ -3902,8 +3988,9 @@ public PerfLevel getPerfLevel() { } /** - * Return perf context bound to this thread. - * @return + * Return performance context bound to this thread. + * + * @return the performance context */ public PerfContext getPerfContext() { long native_handle = getPerfContextNative(); @@ -3911,7 +3998,7 @@ public PerfContext getPerfContext() { } /** - * Get the options for the column family handle + * Get the options for the column family handle. * * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle} * instance, or null for the default column family. @@ -4326,6 +4413,9 @@ public void enableFileDeletions() throws RocksDBException { enableFileDeletions(nativeHandle_); } + /** + * Live files. + */ public static class LiveFiles { /** * The valid size of the manifest file. The manifest file is an ever growing @@ -4788,15 +4878,29 @@ private static long[] toRangeSliceHandles(final List ranges) { return rangeSliceHandles; } + /** + * Store the options instance. + * + * This is used to ensure it is correct released later. + * + * @param options the options. + */ protected void storeOptionsInstance(final DBOptionsInterface options) { options_ = options; } - protected void storeDefaultColumnFamilyHandle(ColumnFamilyHandle columnFamilyHandle) { + /** + * Store the default column family handle. + * + * This is used to ensure it is correct released later. + * + * @param columnFamilyHandle the handle of the default column family. + */ + protected void storeDefaultColumnFamilyHandle(final ColumnFamilyHandle columnFamilyHandle) { defaultColumnFamilyHandle_ = columnFamilyHandle; } - private static void checkBounds(int offset, int len, int size) { + private static void checkBounds(final int offset, final int len, final int size) { if ((offset | len | (offset + len) | (size - (offset + len))) < 0) { throw new IndexOutOfBoundsException(String.format("offset(%d), len(%d), size(%d)", offset, len, size)); } @@ -5085,28 +5189,56 @@ private static native void destroyDB(final String path, final long optionsHandle private static native int version(); + /** + * The DB Options. + */ protected DBOptionsInterface options_; private static Version version; + /** + * Representation of a 3 part version number, e.g. MAJOR.MINOR.PATCH. + */ public static class Version { private final byte major; private final byte minor; private final byte patch; + /** + * Constructs a new Version. + * + * @param major the major component of the version number. + * @param minor the minor component of the version number. + * @param patch the patch component of the version number. + */ public Version(final byte major, final byte minor, final byte patch) { this.major = major; this.minor = minor; this.patch = patch; } + /** + * Get the major component of the version number. + * + * @return the major component of the version number. + */ public int getMajor() { return major; } + /** + * Get the minor component of the version number. + * + * @return the minor component of the version number. + */ public int getMinor() { return minor; } + /** + * Get the patch component of the version number. + * + * @return the patch component of the version number. + */ public int getPatch() { return patch; } diff --git a/java/src/main/java/org/rocksdb/RocksDBException.java b/java/src/main/java/org/rocksdb/RocksDBException.java index 9df411d121cc..8a74c2f515b7 100644 --- a/java/src/main/java/org/rocksdb/RocksDBException.java +++ b/java/src/main/java/org/rocksdb/RocksDBException.java @@ -11,22 +11,38 @@ */ public class RocksDBException extends Exception { private static final long serialVersionUID = -5187634878466267120L; + + /** + * The error status that led to this exception. + */ /* @Nullable */ private final Status status; /** * The private construct used by a set of public static factory method. * - * @param msg the specified error message. + * @param message the specified error message. */ - public RocksDBException(final String msg) { - this(msg, null); + public RocksDBException(final String message) { + this(message, null); } - public RocksDBException(final String msg, final Status status) { - super(msg); + /** + * Constructs a RocksDBException. + * + * @param message the detail message. The detail message is saved for later retrieval by the + * {@link #getMessage()} method. + * @param status the error status that led to this exception. + */ + public RocksDBException(final String message, final Status status) { + super(message); this.status = status; } + /** + * Constructs a RocksDBException. + * + * @param status the error status that led to this exception. + */ public RocksDBException(final Status status) { super(status.getState() != null ? status.getState() : status.getCodeString()); diff --git a/java/src/main/java/org/rocksdb/RocksIterator.java b/java/src/main/java/org/rocksdb/RocksIterator.java index 8e331d51845c..87713d70590b 100644 --- a/java/src/main/java/org/rocksdb/RocksIterator.java +++ b/java/src/main/java/org/rocksdb/RocksIterator.java @@ -23,8 +23,15 @@ * @see org.rocksdb.RocksObject */ public class RocksIterator extends AbstractRocksIterator { - protected RocksIterator(final RocksDB rocksDB, final long nativeHandle) { - super(rocksDB, nativeHandle); + + /** + * Constructs a RocksIterator. + * + * @param rocksDb the database. + * @param nativeHandle reference to the value of the C++ pointer pointing to the underlying native RocksDB C++ RocksIterator. + */ + protected RocksIterator(final RocksDB rocksDb, final long nativeHandle) { + super(rocksDb, nativeHandle); } /** diff --git a/java/src/main/java/org/rocksdb/RocksMutableObject.java b/java/src/main/java/org/rocksdb/RocksMutableObject.java index eb3215290f84..6312634a4d52 100644 --- a/java/src/main/java/org/rocksdb/RocksMutableObject.java +++ b/java/src/main/java/org/rocksdb/RocksMutableObject.java @@ -22,9 +22,17 @@ public abstract class RocksMutableObject extends AbstractNativeReference { private long nativeHandle_; private boolean owningHandle_; + /** + * Constructs a RocksMutableObject with no initial underlying native C++ object. + */ protected RocksMutableObject() { } + /** + * Constructs a RocksMutableObject. + * + * @param nativeHandle reference to the value of the C++ pointer pointing to the underlying native RocksDB C++ object. + */ protected RocksMutableObject(final long nativeHandle) { this.nativeHandle_ = nativeHandle; this.owningHandle_ = true; @@ -79,9 +87,19 @@ public final synchronized void close() { } } + /** + * Deletes underlying C++ object pointer. + */ protected void disposeInternal() { disposeInternal(nativeHandle_); } + /** + * Deletes any underlying native C++ objects which are owned by this object. + * All subclasses of {@code RocksObject} must + * implement this to release their underlying native C++ objects. + * + * @param handle reference to the value of the C++ pointer pointing to some underlying native RocksDB C++ object. + */ protected abstract void disposeInternal(final long handle); } diff --git a/java/src/main/java/org/rocksdb/RocksObject.java b/java/src/main/java/org/rocksdb/RocksObject.java index cd5de70acbe2..a7657224cfd4 100644 --- a/java/src/main/java/org/rocksdb/RocksObject.java +++ b/java/src/main/java/org/rocksdb/RocksObject.java @@ -24,6 +24,12 @@ public abstract class RocksObject extends AbstractImmutableNativeReference { */ protected final long nativeHandle_; + /** + * Constructor to be called by subclasses to set the + * handle to the underlying C++ object. + * + * @param nativeHandle reference to the value of the C++ pointer pointing to the underlying native RocksDB C++ object. + */ protected RocksObject(final long nativeHandle) { super(true); this.nativeHandle_ = nativeHandle; @@ -37,9 +43,12 @@ protected void disposeInternal() { disposeInternal(nativeHandle_); } + /** + * Deletes any underlying native C++ objects which are owned by this object. + * All subclasses of {@code RocksObject} must + * implement this to release their underlying native C++ objects. + * + * @param handle reference to the value of the C++ pointer pointing to some underlying native RocksDB C++ object. + */ protected abstract void disposeInternal(final long handle); - -// long getNativeHandle() { -// return nativeHandle_; -// } } diff --git a/java/src/main/java/org/rocksdb/SanityLevel.java b/java/src/main/java/org/rocksdb/SanityLevel.java index 30568c363377..e24671c287b6 100644 --- a/java/src/main/java/org/rocksdb/SanityLevel.java +++ b/java/src/main/java/org/rocksdb/SanityLevel.java @@ -6,9 +6,24 @@ package org.rocksdb; +/** + * The Sanity Level. + */ public enum SanityLevel { + + /** + * None. + */ NONE((byte) 0x0), + + /** + * Loosely compatible. + */ LOOSELY_COMPATIBLE((byte) 0x1), + + /** + * Exactly matches. + */ EXACT_MATCH((byte) 0xFF); private final byte value; diff --git a/java/src/main/java/org/rocksdb/SizeApproximationFlag.java b/java/src/main/java/org/rocksdb/SizeApproximationFlag.java index fe3c2dd05be8..3e2759a10bd5 100644 --- a/java/src/main/java/org/rocksdb/SizeApproximationFlag.java +++ b/java/src/main/java/org/rocksdb/SizeApproximationFlag.java @@ -10,8 +10,20 @@ * or file stats approximation or both. */ public enum SizeApproximationFlag { + + /** + * None + */ NONE((byte)0x0), + + /** + * Include Memtable(s). + */ INCLUDE_MEMTABLES((byte)0x1), + + /** + * Include file(s). + */ INCLUDE_FILES((byte)0x2); private final byte value; diff --git a/java/src/main/java/org/rocksdb/SkipListMemTableConfig.java b/java/src/main/java/org/rocksdb/SkipListMemTableConfig.java index b9d143929092..558a2a6f50b8 100644 --- a/java/src/main/java/org/rocksdb/SkipListMemTableConfig.java +++ b/java/src/main/java/org/rocksdb/SkipListMemTableConfig.java @@ -6,6 +6,9 @@ */ public class SkipListMemTableConfig extends MemTableConfig { + /** + * The default lookahead. + */ public static final long DEFAULT_LOOKAHEAD = 0; /** diff --git a/java/src/main/java/org/rocksdb/SstFileManager.java b/java/src/main/java/org/rocksdb/SstFileManager.java index 465cf1cfb731..e201e2603044 100644 --- a/java/src/main/java/org/rocksdb/SstFileManager.java +++ b/java/src/main/java/org/rocksdb/SstFileManager.java @@ -17,9 +17,25 @@ */ //@ThreadSafe public final class SstFileManager extends RocksObject { + + /** + * The default bytes-per-sec rate. + */ public static final long DEFAULT_RATE_BYTES_PER_SEC = 0; + + /** + * The default of whether to delete existing trash. + */ public static final boolean DEFAULT_DELETE_EXISTING_TRASH = true; - public static final double DEFAULT_MAX_TRASH_DB_RATION = 0.25; + + /** + * The default max trash db ratio. + */ + public static final double DEFAULT_MAX_TRASH_DB_RATIO = 0.25; + + /** + * The default max delete chunk size in bytes. + */ public static final long DEFAULT_BYTES_MAX_DELETE_CHUNK = 64 * 1024 * 1024; /** @@ -65,7 +81,7 @@ public SstFileManager(final Env env, /*@Nullable*/ final Logger logger) */ public SstFileManager(final Env env, /*@Nullable*/ final Logger logger, final long rateBytesPerSec) throws RocksDBException { - this(env, logger, rateBytesPerSec, DEFAULT_MAX_TRASH_DB_RATION); + this(env, logger, rateBytesPerSec, DEFAULT_MAX_TRASH_DB_RATIO); } /** diff --git a/java/src/main/java/org/rocksdb/SstFileReader.java b/java/src/main/java/org/rocksdb/SstFileReader.java index 46bebf1dd2e3..e7e81bcc2da0 100644 --- a/java/src/main/java/org/rocksdb/SstFileReader.java +++ b/java/src/main/java/org/rocksdb/SstFileReader.java @@ -5,7 +5,16 @@ package org.rocksdb; +/** + * An SST File Reader. + */ public class SstFileReader extends RocksObject { + + /** + * Constructs an SstFileReader. + * + * @param options the options for the reader. + */ public SstFileReader(final Options options) { super(newSstFileReader(options.nativeHandle_)); } diff --git a/java/src/main/java/org/rocksdb/SstFileReaderIterator.java b/java/src/main/java/org/rocksdb/SstFileReaderIterator.java index 31f2f393aaf3..71b2d46fb9e4 100644 --- a/java/src/main/java/org/rocksdb/SstFileReaderIterator.java +++ b/java/src/main/java/org/rocksdb/SstFileReaderIterator.java @@ -21,6 +21,13 @@ * @see RocksObject */ public class SstFileReaderIterator extends AbstractRocksIterator { + + /** + * Constructs a SstFileReaderIterator. + * + * @param reader the SST file reader. + * @param nativeHandle reference to the value of the C++ pointer pointing to the underlying native RocksDB C++ SstFileReaderIterator. + */ protected SstFileReaderIterator(final SstFileReader reader, final long nativeHandle) { super(reader, nativeHandle); } diff --git a/java/src/main/java/org/rocksdb/SstPartitionerFactory.java b/java/src/main/java/org/rocksdb/SstPartitionerFactory.java index ea6f13565995..a87cfd75ffa4 100644 --- a/java/src/main/java/org/rocksdb/SstPartitionerFactory.java +++ b/java/src/main/java/org/rocksdb/SstPartitionerFactory.java @@ -9,6 +9,11 @@ * Handle to factory for SstPartitioner. It is used in {@link ColumnFamilyOptions} */ public abstract class SstPartitionerFactory extends RocksObject { + /** + * Constructs a SstPartitionerFactory. + * + * @param nativeHandle reference to the value of the C++ pointer pointing to the underlying native RocksDB C++ SstPartitionerFactory. + */ protected SstPartitionerFactory(final long nativeHandle) { super(nativeHandle); } diff --git a/java/src/main/java/org/rocksdb/SstPartitionerFixedPrefixFactory.java b/java/src/main/java/org/rocksdb/SstPartitionerFixedPrefixFactory.java index d9b7184aa012..128243db0161 100644 --- a/java/src/main/java/org/rocksdb/SstPartitionerFixedPrefixFactory.java +++ b/java/src/main/java/org/rocksdb/SstPartitionerFixedPrefixFactory.java @@ -9,6 +9,12 @@ * Fixed prefix factory. It partitions SST files using fixed prefix of the key. */ public class SstPartitionerFixedPrefixFactory extends SstPartitionerFactory { + + /** + * Constructs an SstPartitionerFixedPrefixFactory. + * + * @param prefixLength the prefix length of the keys for partitioning. + */ public SstPartitionerFixedPrefixFactory(final long prefixLength) { super(newSstPartitionerFixedPrefixFactory0(prefixLength)); } diff --git a/java/src/main/java/org/rocksdb/StateType.java b/java/src/main/java/org/rocksdb/StateType.java index 803fa37d91ec..8151b098cf3e 100644 --- a/java/src/main/java/org/rocksdb/StateType.java +++ b/java/src/main/java/org/rocksdb/StateType.java @@ -12,7 +12,15 @@ * such as reading / writing a file or waiting for a mutex. */ public enum StateType { + + /** + * Unknown. + */ STATE_UNKNOWN((byte)0x0), + + /** + * Waiting on Mutex. + */ STATE_MUTEX_WAIT((byte)0x1); private final byte value; diff --git a/java/src/main/java/org/rocksdb/Statistics.java b/java/src/main/java/org/rocksdb/Statistics.java index 80ae24586930..f9c1f20d645c 100644 --- a/java/src/main/java/org/rocksdb/Statistics.java +++ b/java/src/main/java/org/rocksdb/Statistics.java @@ -13,18 +13,37 @@ */ public class Statistics extends RocksObject { + /** + * Constructs a Statistics. + */ public Statistics() { super(newStatisticsInstance()); } + /** + * Constructs a Statistics. + * + * @param otherStatistics another statistics object to copy stats from. + */ public Statistics(final Statistics otherStatistics) { super(newStatistics(otherStatistics.nativeHandle_)); } + /** + * Constructs a Statistics. + * + * @param ignoreHistograms histograms to ignore. + */ public Statistics(final EnumSet ignoreHistograms) { super(newStatisticsInstance(toArrayValues(ignoreHistograms))); } + /** + * Constructs a Statistics. + * + * @param ignoreHistograms histograms to ignore. + * @param otherStatistics another statistics object to copy stats from. + */ public Statistics(final EnumSet ignoreHistograms, final Statistics otherStatistics) { super(newStatistics(toArrayValues(ignoreHistograms), otherStatistics.nativeHandle_)); } diff --git a/java/src/main/java/org/rocksdb/StatisticsCollector.java b/java/src/main/java/org/rocksdb/StatisticsCollector.java index dd0d98fe5214..e034bbfdc405 100644 --- a/java/src/main/java/org/rocksdb/StatisticsCollector.java +++ b/java/src/main/java/org/rocksdb/StatisticsCollector.java @@ -41,6 +41,9 @@ public StatisticsCollector( _executorService = Executors.newSingleThreadExecutor(); } + /** + * Start collecting statistics. + */ public void start() { _executorService.submit(collectStatistics()); } diff --git a/java/src/main/java/org/rocksdb/StatsCollectorInput.java b/java/src/main/java/org/rocksdb/StatsCollectorInput.java index 5bf43ade5a6f..f36f7baa3f7f 100644 --- a/java/src/main/java/org/rocksdb/StatsCollectorInput.java +++ b/java/src/main/java/org/rocksdb/StatsCollectorInput.java @@ -25,11 +25,21 @@ public StatsCollectorInput(final Statistics statistics, _statsCallback = statsCallback; } + /** + * Get the statistics. + * + * @return the statistics. + */ public Statistics getStatistics() { return _statistics; } - public StatisticsCollectorCallback getCallback() { + /** + * Get the statistics collector callback. + * + * @return the statistics collector callback. + */ + StatisticsCollectorCallback getCallback() { return _statsCallback; } } diff --git a/java/src/main/java/org/rocksdb/Status.java b/java/src/main/java/org/rocksdb/Status.java index 5f751f422089..fa8e86bc603c 100644 --- a/java/src/main/java/org/rocksdb/Status.java +++ b/java/src/main/java/org/rocksdb/Status.java @@ -16,10 +16,29 @@ */ public class Status implements Serializable { private static final long serialVersionUID = -3794191127754280439L; + + /** + * The status code. + */ private final Code code; + + /** + * The status sub-code. + */ /* @Nullable */ private final SubCode subCode; + + /** + * The state of the status. + */ /* @Nullable */ private final String state; + /** + * Constructs a Status. + * + * @param code the code. + * @param subCode the sub-code. + * @param state the state. + */ public Status(final Code code, final SubCode subCode, final String state) { this.code = code; this.subCode = subCode; @@ -35,18 +54,38 @@ private Status(final byte code, final byte subCode, final String state) { this.state = state; } + /** + * Get the status code. + * + * @return the status code. + */ public Code getCode() { return code; } + /** + * Get the status sub-code. + * + * @return the status sub-code. + */ public SubCode getSubCode() { return subCode; } + /** + * Get the state of the status. + * + * @return the status state. + */ public String getState() { return state; } + /** + * Get a string representation of the status code. + * + * @return a string representation of the status code. + */ public String getCodeString() { final StringBuilder builder = new StringBuilder() .append(code.name()); @@ -58,22 +97,85 @@ public String getCodeString() { return builder.toString(); } - // should stay in sync with /include/rocksdb/status.h:Code and /java/rocksjni/portal.h:toJavaStatusCode + /** + * Status Code. + *

+ * Should stay in sync with /include/rocksdb/status.h:Code and /java/rocksjni/portal.h:toJavaStatusCode + */ public enum Code { + /** + * Success. + */ Ok( (byte)0x0), + + /** + * Not found. + */ NotFound( (byte)0x1), + + /** + * Corruption detected. + */ Corruption( (byte)0x2), + + /** + * Not supported. + */ NotSupported( (byte)0x3), + + /** + * Invalid argument provided. + */ InvalidArgument( (byte)0x4), + + /** + * I/O error. + */ IOError( (byte)0x5), + + /** + * There is a merge in progress. + */ MergeInProgress( (byte)0x6), + + /** + * Incomplete. + */ Incomplete( (byte)0x7), + + /** + * There is a shutdown in progress. + */ ShutdownInProgress( (byte)0x8), + + /** + * An operation timed out. + */ TimedOut( (byte)0x9), + + /** + * An operation was aborted. + */ Aborted( (byte)0xA), + + /** + * The system is busy. + */ Busy( (byte)0xB), + + /** + * The request expired. + */ Expired( (byte)0xC), + + /** + * The operation should be reattempted. + */ TryAgain( (byte)0xD), + + /** + * Undefined. + */ Undefined( (byte)0x7F); private final byte value; @@ -82,6 +184,15 @@ public enum Code { this.value = value; } + /** + * Get a code from its byte representation. + * + * @param value the byte representation of the code. + * + * @return the code + * + * @throws IllegalArgumentException if the {@code value} parameter does not represent a code. + */ public static Code getCode(final byte value) { for (final Code code : Code.values()) { if (code.value == value){ @@ -102,16 +213,56 @@ public byte getValue() { } } - // should stay in sync with /include/rocksdb/status.h:SubCode and /java/rocksjni/portal.h:toJavaStatusSubCode + /** + * Status Sub-code. + *

+ * should stay in sync with /include/rocksdb/status.h:SubCode and /java/rocksjni/portal.h:toJavaStatusSubCode + */ public enum SubCode { + + /** + * None. + */ None( (byte)0x0), + + /** + * Timeout whilst waiting on Mutex. + */ MutexTimeout( (byte)0x1), + + /** + * Timeout whilst waiting on Lock. + */ LockTimeout( (byte)0x2), + + /** + * Maximum limit on number of locks reached. + */ LockLimit( (byte)0x3), + + /** + * No space remaining. + */ NoSpace( (byte)0x4), + + /** + * Deadlock detected. + */ Deadlock( (byte)0x5), + + /** + * Stale file detected. + */ StaleFile( (byte)0x6), + + /** + * Reached the maximum memory limit. + */ MemoryLimit( (byte)0x7), + + /** + * Undefined. + */ Undefined( (byte)0x7F); private final byte value; @@ -120,6 +271,15 @@ public enum SubCode { this.value = value; } + /** + * Get a sub-code from its byte representation. + * + * @param value the byte representation of the sub-code. + * + * @return the sub-code + * + * @throws IllegalArgumentException if the {@code value} parameter does not represent a sub-code. + */ public static SubCode getSubCode(final byte value) { for (final SubCode subCode : SubCode.values()) { if (subCode.value == value){ diff --git a/java/src/main/java/org/rocksdb/StringAppendOperator.java b/java/src/main/java/org/rocksdb/StringAppendOperator.java index 25b134c44af8..2bf2f1aff6f1 100644 --- a/java/src/main/java/org/rocksdb/StringAppendOperator.java +++ b/java/src/main/java/org/rocksdb/StringAppendOperator.java @@ -11,14 +11,27 @@ * two strings. */ public class StringAppendOperator extends MergeOperator { + /** + * Constructs a StringAppendOperator. + */ public StringAppendOperator() { this(','); } + /** + * Constructs a StringAppendOperator. + * + * @param delim the character delimiter to use when appending. + */ public StringAppendOperator(final char delim) { super(newSharedStringAppendOperator(delim)); } + /** + * Constructs a StringAppendOperator. + * + * @param delim the string delimiter to use when appending. + */ public StringAppendOperator(final String delim) { super(newSharedStringAppendOperator(delim)); } diff --git a/java/src/main/java/org/rocksdb/TableFileCreationBriefInfo.java b/java/src/main/java/org/rocksdb/TableFileCreationBriefInfo.java index 8dc56796a25d..aaf34b2cbd57 100644 --- a/java/src/main/java/org/rocksdb/TableFileCreationBriefInfo.java +++ b/java/src/main/java/org/rocksdb/TableFileCreationBriefInfo.java @@ -7,6 +7,9 @@ import java.util.Objects; +/** + * Brief information on Table File creation. + */ public class TableFileCreationBriefInfo { private final String dbName; private final String columnFamilyName; diff --git a/java/src/main/java/org/rocksdb/TableFileCreationInfo.java b/java/src/main/java/org/rocksdb/TableFileCreationInfo.java index 5654603c3833..1b65712b3b3b 100644 --- a/java/src/main/java/org/rocksdb/TableFileCreationInfo.java +++ b/java/src/main/java/org/rocksdb/TableFileCreationInfo.java @@ -7,6 +7,9 @@ import java.util.Objects; +/** + * Information on Table File creation. + */ public class TableFileCreationInfo extends TableFileCreationBriefInfo { private final long fileSize; private final TableProperties tableProperties; diff --git a/java/src/main/java/org/rocksdb/TableFileCreationReason.java b/java/src/main/java/org/rocksdb/TableFileCreationReason.java index d3984663dd28..f45da28e5776 100644 --- a/java/src/main/java/org/rocksdb/TableFileCreationReason.java +++ b/java/src/main/java/org/rocksdb/TableFileCreationReason.java @@ -5,10 +5,29 @@ package org.rocksdb; +/** + * Reasons for Table File creation. + */ public enum TableFileCreationReason { + + /** + * Flush. + */ FLUSH((byte) 0x00), + + /** + * Compaction. + */ COMPACTION((byte) 0x01), + + /** + * Recovery. + */ RECOVERY((byte) 0x02), + + /** + * Miscellaneous. + */ MISC((byte) 0x03); private final byte value; diff --git a/java/src/main/java/org/rocksdb/TableFileDeletionInfo.java b/java/src/main/java/org/rocksdb/TableFileDeletionInfo.java index 9a777e3336c2..87bd2b8c87af 100644 --- a/java/src/main/java/org/rocksdb/TableFileDeletionInfo.java +++ b/java/src/main/java/org/rocksdb/TableFileDeletionInfo.java @@ -7,6 +7,9 @@ import java.util.Objects; +/** + * Information on Table File deleteion. + */ public class TableFileDeletionInfo { private final String dbName; private final String filePath; diff --git a/java/src/main/java/org/rocksdb/ThreadStatus.java b/java/src/main/java/org/rocksdb/ThreadStatus.java index 4211453d1a0b..c75d85d276f6 100644 --- a/java/src/main/java/org/rocksdb/ThreadStatus.java +++ b/java/src/main/java/org/rocksdb/ThreadStatus.java @@ -7,6 +7,9 @@ import java.util.Map; +/** + * The status of a Thread. + */ public class ThreadStatus { private final long threadId; private final ThreadType threadType; @@ -155,6 +158,13 @@ public static String getOperationName(final OperationType operationType) { return getOperationName(operationType.getValue()); } + /** + * Converts microseconds to a string representation. + * + * @param operationElapsedTime the microseconds. + * + * @return the string representation. + */ public static String microsToString(final long operationElapsedTime) { return microsToStringNative(operationElapsedTime); } diff --git a/java/src/main/java/org/rocksdb/TickerType.java b/java/src/main/java/org/rocksdb/TickerType.java index bf1c73a129fb..70c2def2525b 100644 --- a/java/src/main/java/org/rocksdb/TickerType.java +++ b/java/src/main/java/org/rocksdb/TickerType.java @@ -19,7 +19,7 @@ public enum TickerType { /** * total block cache misses - * + *

* REQUIRES: BLOCK_CACHE_MISS == BLOCK_CACHE_INDEX_MISS + * BLOCK_CACHE_FILTER_MISS + * BLOCK_CACHE_DATA_MISS; @@ -28,7 +28,7 @@ public enum TickerType { /** * total block cache hit - * + *

* REQUIRES: BLOCK_CACHE_HIT == BLOCK_CACHE_INDEX_HIT + * BLOCK_CACHE_FILTER_HIT + * BLOCK_CACHE_DATA_HIT; @@ -38,177 +38,238 @@ public enum TickerType { BLOCK_CACHE_ADD((byte) 0x2), /** - * # of failures when adding blocks to block cache. + * Number of failures when adding blocks to block cache. */ BLOCK_CACHE_ADD_FAILURES((byte) 0x3), /** - * # of times cache miss when accessing index block from block cache. + * Number of times cache miss when accessing index block from block cache. */ BLOCK_CACHE_INDEX_MISS((byte) 0x4), /** - * # of times cache hit when accessing index block from block cache. + * Number of times cache hit when accessing index block from block cache. */ BLOCK_CACHE_INDEX_HIT((byte) 0x5), /** - * # of index blocks added to block cache. + * Number of index blocks added to block cache. */ BLOCK_CACHE_INDEX_ADD((byte) 0x6), /** - * # of bytes of index blocks inserted into cache + * Number of bytes of index blocks inserted into cache */ BLOCK_CACHE_INDEX_BYTES_INSERT((byte) 0x7), /** - * # of times cache miss when accessing filter block from block cache. + * Number of times cache miss when accessing filter block from block cache. */ BLOCK_CACHE_FILTER_MISS((byte) 0x8), /** - * # of times cache hit when accessing filter block from block cache. + * Number of times cache hit when accessing filter block from block cache. */ BLOCK_CACHE_FILTER_HIT((byte) 0x9), /** - * # of filter blocks added to block cache. + * Number of filter blocks added to block cache. */ BLOCK_CACHE_FILTER_ADD((byte) 0xA), /** - * # of bytes of bloom filter blocks inserted into cache + * Number of bytes of bloom filter blocks inserted into cache */ BLOCK_CACHE_FILTER_BYTES_INSERT((byte) 0xB), /** - * # of times cache miss when accessing data block from block cache. + * Number of times cache miss when accessing data block from block cache. */ BLOCK_CACHE_DATA_MISS((byte) 0xC), /** - * # of times cache hit when accessing data block from block cache. + * Number of times cache hit when accessing data block from block cache. */ BLOCK_CACHE_DATA_HIT((byte) 0xD), /** - * # of data blocks added to block cache. + * Number of data blocks added to block cache. */ BLOCK_CACHE_DATA_ADD((byte) 0xE), /** - * # of bytes of data blocks inserted into cache + * Number of bytes of data blocks inserted into cache */ BLOCK_CACHE_DATA_BYTES_INSERT((byte) 0xF), /** - * # of bytes read from cache. + * Number of bytes read from cache. */ BLOCK_CACHE_BYTES_READ((byte) 0x10), /** - * # of bytes written into cache. + * Number of bytes written into cache. */ BLOCK_CACHE_BYTES_WRITE((byte) 0x11), /** - * Block cache related stats for Compression dictionaries + * Number of Block cache Compression dictionary misses. */ BLOCK_CACHE_COMPRESSION_DICT_MISS((byte) 0x12), + + /** + * Number of Block cache Compression dictionary hits. + */ BLOCK_CACHE_COMPRESSION_DICT_HIT((byte) 0x13), + + /** + * Number of Block cache Compression dictionary additions. + */ BLOCK_CACHE_COMPRESSION_DICT_ADD((byte) 0x14), + + /** + * Number of Block cache Compression dictionary bytes inserted. + */ BLOCK_CACHE_COMPRESSION_DICT_BYTES_INSERT((byte) 0x15), /** - * Redundant additions to block cache + * Redundant additions to block cache. */ BLOCK_CACHE_ADD_REDUNDANT((byte) 0x16), + + /** + * Redundant additions to block cache index. + */ BLOCK_CACHE_INDEX_ADD_REDUNDANT((byte) 0x17), + + /** + * Redundant additions to block cache filter. + */ BLOCK_CACHE_FILTER_ADD_REDUNDANT((byte) 0x18), + + /** + * Redundant additions to block cache data. + */ BLOCK_CACHE_DATA_ADD_REDUNDANT((byte) 0x19), + + /** + * Redundant additions to block cache compression dictionary. + */ BLOCK_CACHE_COMPRESSION_DICT_ADD_REDUNDANT((byte) 0x1A), /** - * Number of secondary cache hits + * Number of secondary cache hits. */ SECONDARY_CACHE_HITS((byte) 0x1B), + + /** + * Number of secondary cache filter hits. + */ SECONDARY_CACHE_FILTER_HITS((byte) 0x1C), + + /** + * Number of secondary cache index hits. + */ SECONDARY_CACHE_INDEX_HITS((byte) 0x1D), + + /** + * Number of secondary cache data hits. + */ SECONDARY_CACHE_DATA_HITS((byte) 0x1E), + /** + * Number of compressed secondary cache dummy hits. + */ COMPRESSED_SECONDARY_CACHE_DUMMY_HITS((byte) 0x1F), + + /** + * Number of compressed secondary cache hits. + */ COMPRESSED_SECONDARY_CACHE_HITS((byte) 0x20), + + /** + * Number of compressed secondary cache promotions. + */ COMPRESSED_SECONDARY_CACHE_PROMOTIONS((byte) 0x21), + + /** + * Number of compressed secondary cache promotion skips. + */ COMPRESSED_SECONDARY_CACHE_PROMOTION_SKIPS((byte) 0x22), /** - * # of times bloom filter has avoided file reads. + * Number of times bloom filter has avoided file reads. */ BLOOM_FILTER_USEFUL((byte) 0x23), /** - * # of times bloom FullFilter has not avoided the reads. + * Number of times bloom FullFilter has not avoided the reads. */ BLOOM_FILTER_FULL_POSITIVE((byte) 0x24), /** - * # of times bloom FullFilter has not avoided the reads and data actually + * Number of times bloom FullFilter has not avoided the reads and data actually * exist. */ BLOOM_FILTER_FULL_TRUE_POSITIVE((byte) 0x25), /** - * Number of times bloom was checked before creating iterator on a - * file, and the number of times the check was useful in avoiding - * iterator creation (and thus likely IOPs). + * Number of times bloom was checked before creating iterator on a file. */ BLOOM_FILTER_PREFIX_CHECKED((byte) 0x26), + + /** + * Number of times it was useful (in avoiding iterator creation) that bloom was checked before creating iterator on a file. + */ BLOOM_FILTER_PREFIX_USEFUL((byte) 0x27), + + /** + * Number of times bloom produced a true positive result. + */ BLOOM_FILTER_PREFIX_TRUE_POSITIVE((byte) 0x28), /** - * # persistent cache hit + * Number of persistent cache hit */ PERSISTENT_CACHE_HIT((byte) 0x29), /** - * # persistent cache miss + * Number of persistent cache miss */ PERSISTENT_CACHE_MISS((byte) 0x2A), /** - * # total simulation block cache hits + * Number of total simulation block cache hits */ SIM_BLOCK_CACHE_HIT((byte) 0x2B), /** - * # total simulation block cache misses + * Number of total simulation block cache misses */ SIM_BLOCK_CACHE_MISS((byte) 0x2C), /** - * # of memtable hits. + * Number of memtable hits. */ MEMTABLE_HIT((byte) 0x2D), /** - * # of memtable misses. + * Number of of memtable misses. */ MEMTABLE_MISS((byte) 0x2E), /** - * # of Get() queries served by L0 + * Number of Get() queries served by L0 */ GET_HIT_L0((byte) 0x2F), /** - * # of Get() queries served by L1 + * Number of Get() queries served by L1 */ GET_HIT_L1((byte) 0x30), /** - * # of Get() queries served by L2 and up + * Number of Get() queries served by L2 and up */ GET_HIT_L2_AND_UP((byte) 0x31), @@ -340,8 +401,14 @@ public enum TickerType { */ NO_ITERATOR_DELETED((byte) 0x48), + /** + * Number of file opens. + */ NO_FILE_OPENS((byte) 0x49), + /** + * Number of file errors. + */ NO_FILE_ERRORS((byte) 0x4A), /** @@ -376,6 +443,9 @@ public enum TickerType { */ NUMBER_MULTIGET_KEYS_FOUND((byte) 0x50), + /** + * Number of Merge failures. + */ NUMBER_MERGE_FAILURES((byte) 0x51), /** @@ -426,47 +496,129 @@ public enum TickerType { FLUSH_WRITE_BYTES((byte) 0x5A), /** - * Compaction read and write statistics broken down by CompactionReason + * Compaction read bytes marked. */ COMPACT_READ_BYTES_MARKED((byte) 0x5B), + + /** + * Compaction read bytes periodically. + */ COMPACT_READ_BYTES_PERIODIC((byte) 0x5C), + + /** + * Compaction read bytes TTL. + */ COMPACT_READ_BYTES_TTL((byte) 0x5D), + + /** + * Compaction write bytes marked. + */ COMPACT_WRITE_BYTES_MARKED((byte) 0x5E), + + /** + * Compaction write bytes periodically. + */ COMPACT_WRITE_BYTES_PERIODIC((byte) 0x5F), + + /** + * Compaction write bytes TTL. + */ COMPACT_WRITE_BYTES_TTL((byte) 0x60), /** - * Number of table's properties loaded directly from file, without creating - * table reader object. + * Number of table's properties loaded directly from file, without creating table reader object. */ NUMBER_DIRECT_LOAD_TABLE_PROPERTIES((byte) 0x61), + + /** + * Number of supervision acquires. + */ NUMBER_SUPERVERSION_ACQUIRES((byte) 0x62), + + /** + * Number of supervision releases. + */ NUMBER_SUPERVERSION_RELEASES((byte) 0x63), + + /** + * Number of supervision cleanups. + */ NUMBER_SUPERVERSION_CLEANUPS((byte) 0x64), /** - * # of compressions/decompressions executed + * Number of compressions executed. */ NUMBER_BLOCK_COMPRESSED((byte) 0x65), + + /** + * Number of decompressions executed. + */ NUMBER_BLOCK_DECOMPRESSED((byte) 0x66), + /** + * Number of input bytes (uncompressed) to compression for SST blocks that are stored compressed. + */ BYTES_COMPRESSED_FROM((byte) 0x67), + + /** + * Number of output bytes (compressed) from compression for SST blocks that are stored compressed. + */ BYTES_COMPRESSED_TO((byte) 0x68), + + /** + * Number of uncompressed bytes for SST blocks that are stored uncompressed because compression type is kNoCompression, or some error case caused compression not to run or produce an output. Index blocks are only counted if enable_index_compression is true. + */ BYTES_COMPRESSION_BYPASSED((byte) 0x69), + + /** + * Number of input bytes (uncompressed) to compression for SST blocks that are stored uncompressed because the compression result was rejected, either because the ratio was not acceptable (see CompressionOptions::max_compressed_bytes_per_kb) or found invalid by the `verify_compression` option. + */ BYTES_COMPRESSION_REJECTED((byte) 0x6A), + + /** + * Like {@link #BYTES_COMPRESSION_BYPASSED} but counting number of blocks. + */ NUMBER_BLOCK_COMPRESSION_BYPASSED((byte) 0x6B), + + /** + * Like {@link #BYTES_COMPRESSION_REJECTED} but counting number of blocks. + */ NUMBER_BLOCK_COMPRESSION_REJECTED((byte) 0x6C), + + /** + * Number of input bytes (compressed) to decompression in reading compressed SST blocks from storage. + */ BYTES_DECOMPRESSED_FROM((byte) 0x6D), + + /** + * Number of output bytes (uncompressed) from decompression in reading compressed SST blocks from storage. + */ BYTES_DECOMPRESSED_TO((byte) 0x6E), + /** + * Merge operations cumulative time. + */ MERGE_OPERATION_TOTAL_TIME((byte) 0x6F), + + + /** + * Filter operations cumulative time. + */ FILTER_OPERATION_TOTAL_TIME((byte) 0x70), + + /** + * Compaction CPU cumulative time. + */ COMPACTION_CPU_TOTAL_TIME((byte) 0x71), /** - * Row cache. + * Row cache hits. */ ROW_CACHE_HIT((byte) 0x72), + + /** + * Row cache misses. + */ ROW_CACHE_MISS((byte) 0x73), /** @@ -495,97 +647,97 @@ public enum TickerType { /** * BlobDB specific stats - * # of Put/PutTTL/PutUntil to BlobDB. + * Number of Put/PutTTL/PutUntil to BlobDB. */ BLOB_DB_NUM_PUT((byte) 0x77), /** - * # of Write to BlobDB. + * Number of Write to BlobDB. */ BLOB_DB_NUM_WRITE((byte) 0x78), /** - * # of Get to BlobDB. + * Number of Get to BlobDB. */ BLOB_DB_NUM_GET((byte) 0x79), /** - * # of MultiGet to BlobDB. + * Number of MultiGet to BlobDB. */ BLOB_DB_NUM_MULTIGET((byte) 0x7A), /** - * # of Seek/SeekToFirst/SeekToLast/SeekForPrev to BlobDB iterator. + * Number of Seek/SeekToFirst/SeekToLast/SeekForPrev to BlobDB iterator. */ BLOB_DB_NUM_SEEK((byte) 0x7B), /** - * # of Next to BlobDB iterator. + * Number of Next to BlobDB iterator. */ BLOB_DB_NUM_NEXT((byte) 0x7C), /** - * # of Prev to BlobDB iterator. + * Number of Prev to BlobDB iterator. */ BLOB_DB_NUM_PREV((byte) 0x7D), /** - * # of keys written to BlobDB. + * Number of keys written to BlobDB. */ BLOB_DB_NUM_KEYS_WRITTEN((byte) 0x7E), /** - * # of keys read from BlobDB. + * Number of keys read from BlobDB. */ BLOB_DB_NUM_KEYS_READ((byte) 0x7F), /** - * # of bytes (key + value) written to BlobDB. + * Number of bytes (key + value) written to BlobDB. */ BLOB_DB_BYTES_WRITTEN((byte) -0x1), /** - * # of bytes (keys + value) read from BlobDB. + * Number of bytes (keys + value) read from BlobDB. */ BLOB_DB_BYTES_READ((byte) -0x2), /** - * # of keys written by BlobDB as non-TTL inlined value. + * Number of keys written by BlobDB as non-TTL inlined value. */ BLOB_DB_WRITE_INLINED((byte) -0x3), /** - * # of keys written by BlobDB as TTL inlined value. + * Number of keys written by BlobDB as TTL inlined value. */ BLOB_DB_WRITE_INLINED_TTL((byte) -0x4), /** - * # of keys written by BlobDB as non-TTL blob value. + * Number of keys written by BlobDB as non-TTL blob value. */ BLOB_DB_WRITE_BLOB((byte) -0x5), /** - * # of keys written by BlobDB as TTL blob value. + * Number of keys written by BlobDB as TTL blob value. */ BLOB_DB_WRITE_BLOB_TTL((byte) -0x6), /** - * # of bytes written to blob file. + * Number of bytes written to blob file. */ BLOB_DB_BLOB_FILE_BYTES_WRITTEN((byte) -0x7), /** - * # of bytes read from blob file. + * Number of bytes read from blob file. */ BLOB_DB_BLOB_FILE_BYTES_READ((byte) -0x8), /** - * # of times a blob files being synced. + * Number of times a blob files being synced. */ BLOB_DB_BLOB_FILE_SYNCED((byte) -0x9), /** - * # of blob index evicted from base DB by BlobDB compaction filter because + * Number of blob index evicted from base DB by BlobDB compaction filter because * of expiration. */ BLOB_DB_BLOB_INDEX_EXPIRED_COUNT((byte) -0xA), @@ -597,7 +749,7 @@ public enum TickerType { BLOB_DB_BLOB_INDEX_EXPIRED_SIZE((byte) -0xB), /** - * # of blob index evicted from base DB by BlobDB compaction filter because + * Number of blob index evicted from base DB by BlobDB compaction filter because * of corresponding file deleted. */ BLOB_DB_BLOB_INDEX_EVICTED_COUNT((byte) -0xC), @@ -609,114 +761,114 @@ public enum TickerType { BLOB_DB_BLOB_INDEX_EVICTED_SIZE((byte) -0xD), /** - * # of blob files being garbage collected. + * Number of blob files being garbage collected. */ BLOB_DB_GC_NUM_FILES((byte) -0xE), /** - * # of blob files generated by garbage collection. + * Number of blob files generated by garbage collection. */ BLOB_DB_GC_NUM_NEW_FILES((byte) -0xF), /** - * # of BlobDB garbage collection failures. + * Number of BlobDB garbage collection failures. */ BLOB_DB_GC_FAILURES((byte) -0x10), /** - * # of keys relocated to new blob file by garbage collection. + * Number of keys relocated to new blob file by garbage collection. */ BLOB_DB_GC_NUM_KEYS_RELOCATED((byte) -0x11), /** - * # of bytes relocated to new blob file by garbage collection. + * Number of bytes relocated to new blob file by garbage collection. */ BLOB_DB_GC_BYTES_RELOCATED((byte) -0x12), /** - * # of blob files evicted because of BlobDB is full. + * Number of blob files evicted because of BlobDB is full. */ BLOB_DB_FIFO_NUM_FILES_EVICTED((byte) -0x13), /** - * # of keys in the blob files evicted because of BlobDB is full. + * Number of keys in the blob files evicted because of BlobDB is full. */ BLOB_DB_FIFO_NUM_KEYS_EVICTED((byte) -0x14), /** - * # of bytes in the blob files evicted because of BlobDB is full. + * Number of bytes in the blob files evicted because of BlobDB is full. */ BLOB_DB_FIFO_BYTES_EVICTED((byte) -0x15), /** - * # of times cache miss when accessing blob from blob cache. + * Number of times cache miss when accessing blob from blob cache. */ BLOB_DB_CACHE_MISS((byte) -0x16), /** - * # of times cache hit when accessing blob from blob cache. + * Number of times cache hit when accessing blob from blob cache. */ BLOB_DB_CACHE_HIT((byte) -0x17), /** - * # of data blocks added to blob cache. + * Number of data blocks added to blob cache. */ BLOB_DB_CACHE_ADD((byte) -0x18), /** - * # # of failures when adding blobs to blob cache. + * Number of failures when adding blobs to blob cache. */ BLOB_DB_CACHE_ADD_FAILURES((byte) -0x19), /** - * # of bytes read from blob cache. + * Number of bytes read from blob cache. */ BLOB_DB_CACHE_BYTES_READ((byte) -0x1A), /** - * # of bytes written into blob cache. + * Number of bytes written into blob cache. */ BLOB_DB_CACHE_BYTES_WRITE((byte) -0x1B), /** * These counters indicate a performance issue in WritePrepared transactions. * We should not seem them ticking them much. - * # of times prepare_mutex_ is acquired in the fast path. + * Number of times prepare_mutex_ is acquired in the fast path. */ TXN_PREPARE_MUTEX_OVERHEAD((byte) -0x1C), /** - * # of times old_commit_map_mutex_ is acquired in the fast path. + * Number of times old_commit_map_mutex_ is acquired in the fast path. */ TXN_OLD_COMMIT_MAP_MUTEX_OVERHEAD((byte) -0x1D), /** - * # of times we checked a batch for duplicate keys. + * Number of times we checked a batch for duplicate keys. */ TXN_DUPLICATE_KEY_OVERHEAD((byte) -0x1E), /** - * # of times snapshot_mutex_ is acquired in the fast path. + * Number of times snapshot_mutex_ is acquired in the fast path. */ TXN_SNAPSHOT_MUTEX_OVERHEAD((byte) -0x1F), /** - * # of times ::Get returned TryAgain due to expired snapshot seq + * Number of times ::Get returned TryAgain due to expired snapshot seq */ TXN_GET_TRY_AGAIN((byte) -0x20), /** - * # of files marked as trash by delete scheduler + * Number of files marked as trash by delete scheduler */ FILES_MARKED_TRASH((byte) -0x21), /** - * # of trash files deleted by the background thread from the trash queue + * Number of trash files deleted by the background thread from the trash queue */ FILES_DELETED_FROM_TRASH_QUEUE((byte) -0x22), /** - * # of files deleted immediately by delete scheduler + * Number of files deleted immediately by delete scheduler */ FILES_DELETED_IMMEDIATELY((byte) -0x23), @@ -724,10 +876,31 @@ public enum TickerType { * DB error handler statistics */ ERROR_HANDLER_BG_ERROR_COUNT((byte) -0x24), + + /** + * Number of background errors handled by the error handler. + */ ERROR_HANDLER_BG_IO_ERROR_COUNT((byte) -0x25), + + /** + * Number of retryable background I/O errors handled by the error handler. + * This is a subset of {@link #ERROR_HANDLER_BG_IO_ERROR_COUNT}. + */ ERROR_HANDLER_BG_RETRYABLE_IO_ERROR_COUNT((byte) -0x26), + + /** + * Number of auto resumes handled by the error handler. + */ ERROR_HANDLER_AUTORESUME_COUNT((byte) -0x27), + + /** + * Total Number of auto resume retries handled by the error handler. + */ ERROR_HANDLER_AUTORESUME_RETRY_TOTAL_COUNT((byte) -0x28), + + /** + * Number of auto resumes that succeded that were handled by the error handler. + */ ERROR_HANDLER_AUTORESUME_SUCCESS_COUNT((byte) -0x29), /** @@ -737,6 +910,7 @@ public enum TickerType { * eventually be written to SSTable). */ MEMTABLE_PAYLOAD_BYTES_AT_FLUSH((byte) -0x2A), + /** * Outdated bytes of data present on memtable at flush time. */ @@ -748,37 +922,93 @@ public enum TickerType { VERIFY_CHECKSUM_READ_BYTES((byte) -0x2C), /** - * Bytes read/written while creating backups + * Bytes read whilst creating backups. */ BACKUP_READ_BYTES((byte) -0x2D), + + /** + * Bytes written whilst creating backups. + */ BACKUP_WRITE_BYTES((byte) -0x2E), /** - * Remote compaction read/write statistics + * Remote compaction bytes read. */ REMOTE_COMPACT_READ_BYTES((byte) -0x2F), + + /** + * Remote compaction bytes written. + */ REMOTE_COMPACT_WRITE_BYTES((byte) -0x30), /** - * Tiered storage related statistics + * Bytes read from hot files. */ HOT_FILE_READ_BYTES((byte) -0x31), + + /** + * Bytes read from warm files. + */ WARM_FILE_READ_BYTES((byte) -0x32), + + /** + * Bytes read from cool files. + */ COOL_FILE_READ_BYTES((byte) -0x5B), + + /** + * Bytes read from cold files. + */ COLD_FILE_READ_BYTES((byte) -0x33), + + /** + * Bytes read from ice cold files. + */ ICE_FILE_READ_BYTES((byte) -0x59), + + /** + * Numer of reads from hot files. + */ HOT_FILE_READ_COUNT((byte) -0x34), + + /** + * Numer of reads from warm files. + */ WARM_FILE_READ_COUNT((byte) -0x35), + + /** + * Numer of reads from cool files. + */ COOL_FILE_READ_COUNT((byte) -0x5C), + + /** + * Numer of reads from cold files. + */ COLD_FILE_READ_COUNT((byte) -0x36), + + /** + * Numer of reads from ice cold files. + */ ICE_FILE_READ_COUNT((byte) -0x5A), /** - * (non-)last level read statistics + * Bytes read from the last level. */ LAST_LEVEL_READ_BYTES((byte) -0x37), + + /** + * Number of reads from the last level. + */ LAST_LEVEL_READ_COUNT((byte) -0x38), + + /** + * Bytes read from the non-last level. + */ NON_LAST_LEVEL_READ_BYTES((byte) -0x39), + + /** + * Number of reads from the non-last level. + */ NON_LAST_LEVEL_READ_COUNT((byte) -0x3A), /** @@ -789,22 +1019,26 @@ public enum TickerType { * not find anything relevant, so avoided a likely access to data+index * blocks. */ + LAST_LEVEL_SEEK_FILTERED((byte) -0x3B), /** * Filter match: a filter such as prefix Bloom filter was queried but did * not filter out the seek. */ LAST_LEVEL_SEEK_FILTER_MATCH((byte) -0x3C), + /** * At least one data block was accessed for a Seek() (or variant) on a * sorted run. */ LAST_LEVEL_SEEK_DATA((byte) -0x3D), + /** * At least one value() was accessed for the seek (suggesting it was useful), * and no filter such as prefix Bloom was queried. */ LAST_LEVEL_SEEK_DATA_USEFUL_NO_FILTER((byte) -0x3E), + /** * At least one value() was accessed for the seek (suggesting it was useful), * after querying a filter such as prefix Bloom. @@ -812,16 +1046,32 @@ public enum TickerType { LAST_LEVEL_SEEK_DATA_USEFUL_FILTER_MATCH((byte) -0x3F), /** - * The same set of stats, but for non-last level seeks. + * Similar to {@link #LAST_LEVEL_SEEK_FILTERED} but for the non-last level. */ NON_LAST_LEVEL_SEEK_FILTERED((byte) -0x40), + + /** + * Similar to {@link #LAST_LEVEL_SEEK_FILTER_MATCH} but for the non-last level. + */ NON_LAST_LEVEL_SEEK_FILTER_MATCH((byte) -0x41), + + /** + * Similar to {@link #LAST_LEVEL_SEEK_DATA} but for the non-last level. + */ NON_LAST_LEVEL_SEEK_DATA((byte) -0x42), + + /** + * Similar to {@link #LAST_LEVEL_SEEK_DATA_USEFUL_NO_FILTER} but for the non-last level. + */ NON_LAST_LEVEL_SEEK_DATA_USEFUL_NO_FILTER((byte) -0x43), + + /** + * Similar to {@link #LAST_LEVEL_SEEK_DATA_USEFUL_FILTER_MATCH} but for the non-last level. + */ NON_LAST_LEVEL_SEEK_DATA_USEFUL_FILTER_MATCH((byte) -0x44), /** - * Number of block checksum verifications + * Number of block checksum verifications. */ BLOCK_CHECKSUM_COMPUTE_COUNT((byte) -0x45), @@ -832,15 +1082,18 @@ public enum TickerType { */ BLOCK_CHECKSUM_MISMATCH_COUNT((byte) -0x46), + /** + * Number of multiget co-rountines. + */ MULTIGET_COROUTINE_COUNT((byte) -0x47), /** - * Time spent in the ReadAsync file system call + * Time spent in the ReadAsync file system call. */ READ_ASYNC_MICROS((byte) -0x48), /** - * Number of errors returned to the async read callback + * Number of errors returned to the async read callback. */ ASYNC_READ_ERROR_COUNT((byte) -0x49), @@ -859,33 +1112,63 @@ public enum TickerType { TABLE_OPEN_PREFETCH_TAIL_HIT((byte) -0x4B), /** - * # of times timestamps are checked on accessing the table + * Number of times timestamps are checked on accessing the table */ TIMESTAMP_FILTER_TABLE_CHECKED((byte) -0x4C), /** - * # of times timestamps can successfully help skip the table access + * Number of times timestamps can successfully help skip the table access */ TIMESTAMP_FILTER_TABLE_FILTERED((byte) -0x4D), + /** + * Number of times readahead is trimmed during scans when ReadOptions.auto_readahead_size is set. + */ READAHEAD_TRIMMED((byte) -0x4E), + /** + * Maximum size of the FIFO compactions. + */ FIFO_MAX_SIZE_COMPACTIONS((byte) -0x4F), + /** + * TTL of the FIFO compactions. + */ FIFO_TTL_COMPACTIONS((byte) -0x50), + /** + * Change temperature of the FIFO compactions. + */ FIFO_CHANGE_TEMPERATURE_COMPACTIONS((byte) -0x58), + /** + * Number of bytes prefetched during user initiated scan. + */ PREFETCH_BYTES((byte) -0x51), + /** + * Number of prefetched bytes that were actually useful during user initiated scan. + */ PREFETCH_BYTES_USEFUL((byte) -0x52), + /** + * Number of FS reads avoided due to prefetching during user initiated scan. + */ PREFETCH_HITS((byte) -0x53), + /** + * Footer corruption detected when opening an SST file for reading. + */ SST_FOOTER_CORRUPTION_COUNT((byte) -0x55), + /** + * Counters for file read retries with the verify_and_reconstruct_read file system option after detecting a checksum mismatch. + */ FILE_READ_CORRUPTION_RETRY_COUNT((byte) -0x56), + /** + * Counters for file read retries with the verify_and_reconstruct_read file system option after detecting a checksum mismatch. + */ FILE_READ_CORRUPTION_RETRY_SUCCESS_COUNT((byte) -0x57), /** @@ -897,12 +1180,12 @@ public enum TickerType { NUMBER_WBWI_INGEST((byte) -0x5D), /** - * Failure to load the UDI during SST table open + * Failure to load the UDI during SST table open. */ SST_USER_DEFINED_INDEX_LOAD_FAIL_COUNT((byte) -0x5E), /** - * Bytes of output files successfully resumed during remote compaction + * Bytes of output files successfully resumed during remote compaction. */ REMOTE_COMPACT_RESUMED_BYTES((byte) -0x5F), @@ -911,50 +1194,53 @@ public enum TickerType { */ /** - * # of calls to Iterator::Prepare() for multi-scan + * Number of calls to Iterator::Prepare() for multi-scan. */ MULTISCAN_PREPARE_CALLS((byte) -0x60), /** - * # of errors during Iterator::Prepare() for multi-scan + * Number of errors during Iterator::Prepare() for multi-scan. */ MULTISCAN_PREPARE_ERRORS((byte) -0x61), /** - * # of data blocks prefetched during multi-scan Prepare() + * Number of data blocks prefetched during multi-scan Prepare(). */ MULTISCAN_BLOCKS_PREFETCHED((byte) -0x62), /** - * # of data blocks found in cache during multi-scan Prepare() + * Number of data blocks found in cache during multi-scan Prepare(). */ MULTISCAN_BLOCKS_FROM_CACHE((byte) -0x63), /** - * Total bytes prefetched during multi-scan Prepare() + * Total bytes prefetched during multi-scan Prepare(). */ MULTISCAN_PREFETCH_BYTES((byte) -0x64), /** - * # of prefetched blocks that were never accessed (wasted) + * Number of prefetched blocks that were never accessed (wasted). */ MULTISCAN_PREFETCH_BLOCKS_WASTED((byte) -0x65), /** - * # of I/O requests issued during multi-scan Prepare() + * Number of I/O requests issued during multi-scan Prepare(). */ MULTISCAN_IO_REQUESTS((byte) -0x66), /** - * # of non-adjacent blocks coalesced into single I/O request + * Number of non-adjacent blocks coalesced into single I/O request. */ MULTISCAN_IO_COALESCED_NONADJACENT((byte) -0x67), /** - * # of seek errors during multi-scan iteration + * Number of seek errors during multi-scan iteration. */ MULTISCAN_SEEK_ERRORS((byte) -0x68), + /** + * Maximum number of ticker types. + */ TICKER_ENUM_MAX((byte) -0x54); private final byte value; diff --git a/java/src/main/java/org/rocksdb/TraceOptions.java b/java/src/main/java/org/rocksdb/TraceOptions.java index cf5f7bbe12f8..85c8abcc2500 100644 --- a/java/src/main/java/org/rocksdb/TraceOptions.java +++ b/java/src/main/java/org/rocksdb/TraceOptions.java @@ -12,10 +12,18 @@ public class TraceOptions { private final long maxTraceFileSize; + /** + * Constructs a TraceOptions. + */ public TraceOptions() { this.maxTraceFileSize = 64L * 1024L * 1024L * 1024L; // 64 GB } + /** + * Constructs a TraceOptions. + * + * @param maxTraceFileSize the maximum size of the trace file. + */ public TraceOptions(final long maxTraceFileSize) { this.maxTraceFileSize = maxTraceFileSize; } diff --git a/java/src/main/java/org/rocksdb/Transaction.java b/java/src/main/java/org/rocksdb/Transaction.java index ee8656460835..5db1384c7740 100644 --- a/java/src/main/java/org/rocksdb/Transaction.java +++ b/java/src/main/java/org/rocksdb/Transaction.java @@ -184,7 +184,9 @@ public void clearSnapshot() { } /** - * Prepare the current transaction for 2PC + * Prepare the current transaction for 2PC. + * + * @throws RocksDBException if the transaction cannot be prepared */ public void prepare() throws RocksDBException { //TODO(AR) consider a Java'ish version of this function, which returns an AutoCloseable (commit) @@ -257,7 +259,7 @@ public void rollbackToSavePoint() throws RocksDBException { /** * This function has an inconsistent parameter order compared to other {@code get()} * methods and is deprecated in favour of one with a consistent order. - * + *

* This function is similar to * {@link RocksDB#get(ColumnFamilyHandle, ReadOptions, byte[])} except it will * also read pending changes in this transaction. @@ -297,11 +299,11 @@ public byte[] get(final ColumnFamilyHandle columnFamilyHandle, final ReadOptions * also read pending changes in this transaction. * Currently, this function will return Status::MergeInProgress if the most * recent write to the queried key in this batch is a Merge. - * + *

* If {@link ReadOptions#snapshot()} is not set, the current version of the * key will be read. Calling {@link #setSnapshot()} does not affect the * version of the data returned. - * + *

* Note that setting {@link ReadOptions#setSnapshot(Snapshot)} will affect * what is read from the DB but will NOT change which keys are read from this * transaction (the keys in this transaction do not yet belong to any snapshot @@ -560,7 +562,7 @@ public byte[][] multiGet(final ReadOptions readOptions, * {@link org.rocksdb.ColumnFamilyHandle} instances. * @param keys of keys for which values need to be retrieved. * - * @return Array of values, one for each key + * @return list of values, one for each key * * @throws RocksDBException thrown if error happens in underlying * native library. @@ -646,7 +648,7 @@ public byte[][] multiGet(final ReadOptions readOptions, final byte[][] keys) * {@link org.rocksdb.ColumnFamilyHandle} instances. * @param keys of keys for which values need to be retrieved. * - * @return Array of values, one for each key + * @return list of values, one for each key * * @throws RocksDBException thrown if error happens in underlying * native library. @@ -1189,7 +1191,6 @@ public GetStatus getForUpdate(final ReadOptions readOptions, /** * A multi-key version of * {@link #getForUpdate(ReadOptions, ColumnFamilyHandle, byte[], boolean)}. - *

* * @param readOptions Read options. * @param columnFamilyHandles {@link org.rocksdb.ColumnFamilyHandle} @@ -1225,14 +1226,13 @@ public byte[][] multiGetForUpdate(final ReadOptions readOptions, /** * A multi-key version of * {@link #getForUpdate(ReadOptions, ColumnFamilyHandle, byte[], boolean)}. - *

* * @param readOptions Read options. * @param columnFamilyHandles {@link org.rocksdb.ColumnFamilyHandle} * instances * @param keys the keys to retrieve the values for. * - * @return Array of values, one for each key + * @return list of values, one for each key * * @throws RocksDBException thrown if error happens in underlying * native library. @@ -1261,7 +1261,6 @@ public List multiGetForUpdateAsList(final ReadOptions readOptions, /** * A multi-key version of {@link #getForUpdate(ReadOptions, byte[], boolean)}. - *

* * @param readOptions Read options. * @param keys the keys to retrieve the values for. @@ -1285,7 +1284,6 @@ public byte[][] multiGetForUpdate(final ReadOptions readOptions, final byte[][] /** * A multi-key version of {@link #getForUpdate(ReadOptions, byte[], boolean)}. - *

* * @param readOptions Read options. * @param keys the keys to retrieve the values for. @@ -1332,7 +1330,7 @@ public RocksIterator getIterator() { * Returns an iterator that will iterate on all keys in the default * column family including both keys in the DB and uncommitted keys in this * transaction. - * + *

* Setting {@link ReadOptions#setSnapshot(Snapshot)} will affect what is read * from the DB but will NOT change which keys are read from this transaction * (the keys in this transaction do not yet belong to any snapshot and will be @@ -1555,10 +1553,10 @@ public void put(final ColumnFamilyHandle columnFamilyHandle, /** * Similar to {@link RocksDB#put(byte[], byte[])}, but * will also perform conflict checking on the keys be written. - * + *

* If this Transaction was created on an {@link OptimisticTransactionDB}, * these functions should always succeed. - * + *

* If this Transaction was created on a {@link TransactionDB}, an * {@link RocksDBException} may be thrown with an accompanying {@link Status} * when: @@ -1593,10 +1591,10 @@ public void put(final ByteBuffer key, final ByteBuffer value) throws RocksDBExce /** * Similar to {@link RocksDB#put(byte[], byte[])}, but * will also perform conflict checking on the keys be written. - * + *

* If this Transaction was created on an {@link OptimisticTransactionDB}, * these functions should always succeed. - * + *

* If this Transaction was created on a {@link TransactionDB}, an * {@link RocksDBException} may be thrown with an accompanying {@link Status} * when: @@ -1635,6 +1633,30 @@ public void put(final ColumnFamilyHandle columnFamilyHandle, final ByteBuffer ke key.position(key.limit()); value.position(value.limit()); } + + /** + * Similar to {@link RocksDB#put(byte[], byte[])}, but + * will also perform conflict checking on the keys be written. + *

+ * If this Transaction was created on an {@link OptimisticTransactionDB}, + * these functions should always succeed. + *

+ * If this Transaction was created on a {@link TransactionDB}, an + * {@link RocksDBException} may be thrown with an accompanying {@link Status} + * when: + * {@link Status.Code#Busy} if there is a write conflict, + * {@link Status.Code#TimedOut} if a lock could not be acquired, + * {@link Status.Code#TryAgain} if the memtable history size is not large + * enough. See + * {@link ColumnFamilyOptions#maxWriteBufferNumberToMaintain()} + * + * @param columnFamilyHandle The column family to put the key/value into + * @param key the specified key to be inserted. + * @param value the value associated with the specified key. + * + * @throws RocksDBException when one of the TransactionalDB conditions + * described above occurs, or in the case of an unexpected error + */ public void put(final ColumnFamilyHandle columnFamilyHandle, final ByteBuffer key, final ByteBuffer value) throws RocksDBException { put(columnFamilyHandle, key, value, false); @@ -1755,10 +1777,10 @@ public void merge(final byte[] key, final byte[] value) /** * Similar to {@link RocksDB#merge(byte[], byte[])}, but * will also perform conflict checking on the keys be written. - * + *

* If this Transaction was created on an {@link OptimisticTransactionDB}, * these functions should always succeed. - * + *

* If this Transaction was created on a {@link TransactionDB}, an * {@link RocksDBException} may be thrown with an accompanying {@link Status} * when: @@ -1791,10 +1813,10 @@ public void merge(final ByteBuffer key, final ByteBuffer value) throws RocksDBEx /** * Similar to {@link RocksDB#merge(byte[], byte[])}, but * will also perform conflict checking on the keys be written. - * + *

* If this Transaction was created on an {@link OptimisticTransactionDB}, * these functions should always succeed. - * + *

* If this Transaction was created on a {@link TransactionDB}, an * {@link RocksDBException} may be thrown with an accompanying {@link Status} * when: @@ -1833,10 +1855,10 @@ public void merge(final ColumnFamilyHandle columnFamilyHandle, final ByteBuffer /** * Similar to {@link RocksDB#merge(byte[], byte[])}, but * will also perform conflict checking on the keys be written. - * + *

* If this Transaction was created on an {@link OptimisticTransactionDB}, * these functions should always succeed. - * + *

* If this Transaction was created on a {@link TransactionDB}, an * {@link RocksDBException} may be thrown with an accompanying {@link Status} * when: @@ -2283,10 +2305,10 @@ public void mergeUntracked(final ColumnFamilyHandle columnFamilyHandle, * Similar to {@link RocksDB#merge(ColumnFamilyHandle, byte[], byte[])}, * but operates on the transactions write batch. This write will only happen * if this transaction gets committed successfully. - * + *

* Unlike {@link #merge(ColumnFamilyHandle, byte[], byte[])} no conflict * checking will be performed for this key. - * + *

* If this Transaction was created on a {@link TransactionDB}, this function * will still acquire locks necessary to make sure this write doesn't cause * conflicts in other transactions; This may cause a {@link RocksDBException} @@ -2346,10 +2368,10 @@ public void mergeUntracked(final byte[] key, final byte[] value) * Similar to {@link RocksDB#merge(byte[], byte[])}, * but operates on the transactions write batch. This write will only happen * if this transaction gets committed successfully. - * + *

* Unlike {@link #merge(byte[], byte[])} no conflict * checking will be performed for this key. - * + *

* If this Transaction was created on a {@link TransactionDB}, this function * will still acquire locks necessary to make sure this write doesn't cause * conflicts in other transactions; This may cause a {@link RocksDBException} @@ -2792,20 +2814,57 @@ public long getId() { return getId(nativeHandle_); } + /** + * States of a Transaction. + */ public enum TransactionState { + /** + * Transaction started. + */ STARTED((byte)0), + + /** + * Transaction is awaiting prepare. + */ AWAITING_PREPARE((byte)1), + + /** + * Transaction is prepared. + */ PREPARED((byte)2), + + /** + * Transaction awaiting commit. + */ AWAITING_COMMIT((byte)3), + + /** + * Transaction is committed. + */ COMMITTED((byte)4), + + /** + * Transaction is awaiting rollback. + */ AWAITING_ROLLBACK((byte)5), + + /** + * Transaction rolled-back. + */ ROLLEDBACK((byte)6), + + /** + * Transaction locks have been stolen. + */ LOCKS_STOLEN((byte)7); - /* - * Keep old misspelled variable as alias - * Tip from https://stackoverflow.com/a/37092410/454544 + /** + * Old misspelled variable as alias for {@link #COMMITTED}. + * Tip from https://stackoverflow.com/a/37092410/454544 + * + * @deprecated use {@link #COMMITTED} instead. */ + @Deprecated public static final TransactionState COMMITED = COMMITTED; private final byte value; @@ -2850,6 +2909,9 @@ private WaitingTransactions newWaitingTransactions( return new WaitingTransactions(columnFamilyId, key, transactionIds); } + /** + * Waiting Transactions. + */ public static class WaitingTransactions { private final long columnFamilyId; private final String key; diff --git a/java/src/main/java/org/rocksdb/TransactionDB.java b/java/src/main/java/org/rocksdb/TransactionDB.java index 0f75e5f97019..cff970f6eef9 100644 --- a/java/src/main/java/org/rocksdb/TransactionDB.java +++ b/java/src/main/java/org/rocksdb/TransactionDB.java @@ -218,6 +218,14 @@ public Transaction beginTransaction(final WriteOptions writeOptions, return oldTransaction; } + /** + * Gets a transaction by name. + * + * @param transactionName the name of the transaction. + * + * @return the transaction, or null if the transaction can't be found. + * + */ public Transaction getTransactionByName(final String transactionName) { final long jtxnHandle = getTransactionByName(nativeHandle_, transactionName); if(jtxnHandle == 0) { @@ -232,6 +240,11 @@ public Transaction getTransactionByName(final String transactionName) { return txn; } + /** + * Gets a list of all prepared transactions. + * + * @return the list of prepared transactions. + */ public List getAllPreparedTransactions() { final long[] jtxnHandles = getAllPreparedTransactions(nativeHandle_); @@ -247,11 +260,21 @@ public List getAllPreparedTransactions() { return txns; } + /** + * Information on Key Locks. + */ public static class KeyLockInfo { private final String key; private final long[] transactionIDs; private final boolean exclusive; + /** + * Constructs a KeyLockInfo. + * + * @param key the key. + * @param transactionIDs the transaction ids + * @param exclusive true if the lock is exclusive, false if the lock is shared. + */ @SuppressWarnings("PMD.ArrayIsStoredDirectly") public KeyLockInfo(final String key, final long[] transactionIDs, final boolean exclusive) { this.key = key; @@ -315,6 +338,9 @@ private DeadlockInfo newDeadlockInfo(final long transactionID, final long column waitingKey, exclusive); } + /** + * Information on a Deadlock. + */ public static class DeadlockInfo { private final long transactionID; private final long columnFamilyId; @@ -366,25 +392,49 @@ public boolean isExclusive() { } } + /** + * The paths of a Deadlock. + */ public static class DeadlockPath { final DeadlockInfo[] path; final boolean limitExceeded; + /** + * Construct a DeadLockPack. + * + * @param path the paths + * @param limitExceeded true if the limit is exceeded, false otherwise. + */ @SuppressWarnings("PMD.ArrayIsStoredDirectly") public DeadlockPath(final DeadlockInfo[] path, final boolean limitExceeded) { this.path = path; this.limitExceeded = limitExceeded; } + /** + * Returns true if there are no paths and the limit is not exceeded. + * + * @return true if empty, false otherwise. + */ public boolean isEmpty() { return path.length == 0 && !limitExceeded; } } + /** + * Get Deadlock Information. + * + * @return the deadlock paths. + */ public DeadlockPath[] getDeadlockInfoBuffer() { return getDeadlockInfoBuffer(nativeHandle_); } + /** + * Set the size of the deadlock information buffer. + * + * @param targetSize the target size of the buffer. + */ public void setDeadlockInfoBufferSize(final int targetSize) { setDeadlockInfoBufferSize(nativeHandle_, targetSize); } diff --git a/java/src/main/java/org/rocksdb/TransactionDBOptions.java b/java/src/main/java/org/rocksdb/TransactionDBOptions.java index 8257d50f7a60..abc135ca4b78 100644 --- a/java/src/main/java/org/rocksdb/TransactionDBOptions.java +++ b/java/src/main/java/org/rocksdb/TransactionDBOptions.java @@ -5,8 +5,14 @@ package org.rocksdb; +/** + * Options for TransactionDB. + */ public class TransactionDBOptions extends RocksObject { + /** + * Constructs a TransactionDB. + */ public TransactionDBOptions() { super(newTransactionDBOptions()); } @@ -110,16 +116,15 @@ public TransactionDBOptions setTransactionLockTimeout( /** * The wait timeout in milliseconds when writing a key - * OUTSIDE of a transaction (ie by calling {@link RocksDB#put}, - * {@link RocksDB#merge}, {@link RocksDB#delete} or {@link RocksDB#write} + * OUTSIDE of a transaction (ie by calling {@link RocksDB#put(byte[], byte[])}, + * {@link RocksDB#merge(byte[], byte[])}, {@link RocksDB#delete(WriteOptions, byte[])} or {@link RocksDB#write(WriteOptions, WriteBatch)} * directly). *

* If 0, no waiting is done if a lock cannot instantly be acquired. * If negative, there is no timeout and will block indefinitely when acquiring * a lock. * - * @return the timeout in milliseconds when writing a key OUTSIDE of a - * transaction + * @return the timeout in milliseconds when writing a key outside of the transaction */ public long getDefaultLockTimeout() { assert(isOwningHandle()); @@ -128,8 +133,8 @@ public long getDefaultLockTimeout() { /** * If positive, specifies the wait timeout in milliseconds when writing a key - * OUTSIDE of a transaction (ie by calling {@link RocksDB#put}, - * {@link RocksDB#merge}, {@link RocksDB#delete} or {@link RocksDB#write} + * OUTSIDE of a transaction (ie by calling {@link RocksDB#put(byte[], byte[])}, + * {@link RocksDB#merge(byte[], byte[])}, {@link RocksDB#delete(byte[])} or {@link RocksDB#write(WriteOptions, WriteBatch)} * directly). *

* If 0, no waiting is done if a lock cannot instantly be acquired. @@ -145,7 +150,7 @@ public long getDefaultLockTimeout() { * Default: 1000 * * @param defaultLockTimeout the timeout in milliseconds when writing a key - * OUTSIDE of a transaction + * outside of the transaction * @return this TransactionDBOptions instance */ public TransactionDBOptions setDefaultLockTimeout(final long defaultLockTimeout) { diff --git a/java/src/main/java/org/rocksdb/TransactionOptions.java b/java/src/main/java/org/rocksdb/TransactionOptions.java index d2efeb87ce4a..43a482b240b4 100644 --- a/java/src/main/java/org/rocksdb/TransactionOptions.java +++ b/java/src/main/java/org/rocksdb/TransactionOptions.java @@ -5,9 +5,15 @@ package org.rocksdb; +/** + * Options for a Transaction. + */ public class TransactionOptions extends RocksObject implements TransactionalOptions { + /** + * Constructs a TransactionOptions. + */ public TransactionOptions() { super(newTransactionOptions()); } @@ -56,7 +62,7 @@ public TransactionOptions setDeadlockDetect(final boolean deadlockDetect) { * The wait timeout in milliseconds when a transaction attempts to lock a key. *

* If 0, no waiting is done if a lock cannot instantly be acquired. - * If negative, {@link TransactionDBOptions#getTransactionLockTimeout(long)} + * If negative, {@link TransactionDBOptions#getTransactionLockTimeout()} * will be used * * @return the lock timeout in milliseconds @@ -71,7 +77,7 @@ public long getLockTimeout() { * a transaction attempts to lock a key. *

* If 0, no waiting is done if a lock cannot instantly be acquired. - * If negative, {@link TransactionDBOptions#getTransactionLockTimeout(long)} + * If negative, {@link TransactionDBOptions#getTransactionLockTimeout()} * will be used *

* Default: -1 diff --git a/java/src/main/java/org/rocksdb/UInt64AddOperator.java b/java/src/main/java/org/rocksdb/UInt64AddOperator.java index 536ba58d8352..524d2a18651b 100644 --- a/java/src/main/java/org/rocksdb/UInt64AddOperator.java +++ b/java/src/main/java/org/rocksdb/UInt64AddOperator.java @@ -10,6 +10,9 @@ * integer value. */ public class UInt64AddOperator extends MergeOperator { + /** + * Constructs a UInt64AddOperator. + */ public UInt64AddOperator() { super(newSharedUInt64AddOperator()); } diff --git a/java/src/main/java/org/rocksdb/VectorMemTableConfig.java b/java/src/main/java/org/rocksdb/VectorMemTableConfig.java index d87efb1b7fbf..bc09c50ce08e 100644 --- a/java/src/main/java/org/rocksdb/VectorMemTableConfig.java +++ b/java/src/main/java/org/rocksdb/VectorMemTableConfig.java @@ -5,6 +5,10 @@ * The config for vector memtable representation. */ public class VectorMemTableConfig extends MemTableConfig { + + /** + * The default reserved size for the Vector Mem Table. + */ public static final int DEFAULT_RESERVED_SIZE = 0; /** diff --git a/java/src/main/java/org/rocksdb/WBWIRocksIterator.java b/java/src/main/java/org/rocksdb/WBWIRocksIterator.java index 5f7b7b8a1d70..441f290a5fb8 100644 --- a/java/src/main/java/org/rocksdb/WBWIRocksIterator.java +++ b/java/src/main/java/org/rocksdb/WBWIRocksIterator.java @@ -7,10 +7,19 @@ import java.nio.ByteBuffer; +/** + * Iterator over the contents of a Write Batch With Index. + */ public class WBWIRocksIterator extends AbstractRocksIterator { private final WriteEntry entry = new WriteEntry(); + /** + * Constructs a WBWIRocksIterator. + * + * @param wbwi the write batch with index. + * @param nativeHandle reference to the value of the C++ pointer pointing to the underlying native RocksDB C++ WBWIRocksIterator. + */ protected WBWIRocksIterator(final WriteBatchWithIndex wbwi, final long nativeHandle) { super(wbwi, nativeHandle); @@ -127,12 +136,40 @@ private static native void seekForPrevByteArray0Jni( * that created the record in the Write Batch */ public enum WriteType { + + /** + * Put. + */ PUT((byte)0x0), + + /** + * Merge. + */ MERGE((byte)0x1), + + /** + * Delete. + */ DELETE((byte)0x2), + + /** + * Single Delete. + */ SINGLE_DELETE((byte)0x3), + + /** + * Delete Range. + */ DELETE_RANGE((byte)0x4), + + /** + * Log. + */ LOG((byte)0x5), + + /** + * Transaction ID. + */ XID((byte)0x6); final byte id; @@ -140,13 +177,22 @@ public enum WriteType { this.id = id; } - public static WriteType fromId(final byte id) { + /** + * Get a WriteType from its byte representation. + * + * @param value the byte representation of the WriteType. + * + * @return the WriteType + * + * @throws IllegalArgumentException if the {@code value} parameter does not represent a WriteType. + */ + public static WriteType fromId(final byte value) { for(final WriteType wt : WriteType.values()) { - if(id == wt.id) { + if(value == wt.id) { return wt; } } - throw new IllegalArgumentException("No WriteType with id=" + id); + throw new IllegalArgumentException("No WriteType with id=" + value); } } @@ -182,6 +228,13 @@ private WriteEntry() { value = new DirectSlice(); } + /** + * Constructs a WriteEntry. + * + * @param type the type of the write. + * @param key the key. + * @param value the value. + */ public WriteEntry(final WriteType type, final DirectSlice key, final DirectSlice value) { this.type = type; diff --git a/java/src/main/java/org/rocksdb/WalFileType.java b/java/src/main/java/org/rocksdb/WalFileType.java index fed27ed11705..371f2e7b2ff6 100644 --- a/java/src/main/java/org/rocksdb/WalFileType.java +++ b/java/src/main/java/org/rocksdb/WalFileType.java @@ -5,6 +5,9 @@ package org.rocksdb; +/** + * Types of WAL file. + */ public enum WalFileType { /** * Indicates that WAL file is in archive directory. WAL files are moved from diff --git a/java/src/main/java/org/rocksdb/WalFilter.java b/java/src/main/java/org/rocksdb/WalFilter.java index a2836634af65..eac7b657f18d 100644 --- a/java/src/main/java/org/rocksdb/WalFilter.java +++ b/java/src/main/java/org/rocksdb/WalFilter.java @@ -56,7 +56,14 @@ LogRecordFoundResult logRecordFound(final long logNumber, final String logFileName, final WriteBatch batch, final WriteBatch newBatch); + /** + * LogFoundResult. + */ class LogRecordFoundResult { + + /** + * Constant for continuing processing unchanged. + */ public static LogRecordFoundResult CONTINUE_UNCHANGED = new LogRecordFoundResult(WalProcessingOption.CONTINUE_PROCESSING, false); diff --git a/java/src/main/java/org/rocksdb/WalProcessingOption.java b/java/src/main/java/org/rocksdb/WalProcessingOption.java index 3a9c2be0e3b5..a37c83634587 100644 --- a/java/src/main/java/org/rocksdb/WalProcessingOption.java +++ b/java/src/main/java/org/rocksdb/WalProcessingOption.java @@ -5,8 +5,11 @@ package org.rocksdb; +/** + * Options for WAL processing. + */ public enum WalProcessingOption { - /* + /** * Continue processing as usual. */ CONTINUE_PROCESSING((byte)0x0), @@ -42,6 +45,15 @@ byte getValue() { return value; } + /** + * Get an option from its byte representation. + * + * @param value the byte representation of the option. + * + * @return the option + * + * @throws IllegalArgumentException if the {@code value} parameter does not represent an option. + */ public static WalProcessingOption fromValue(final byte value) { for (final WalProcessingOption walProcessingOption : WalProcessingOption.values()) { if (walProcessingOption.value == value) { diff --git a/java/src/main/java/org/rocksdb/WriteBatch.java b/java/src/main/java/org/rocksdb/WriteBatch.java index 1802d929c226..4fdeecd6c2f3 100644 --- a/java/src/main/java/org/rocksdb/WriteBatch.java +++ b/java/src/main/java/org/rocksdb/WriteBatch.java @@ -392,6 +392,9 @@ private static native void iterate(final long handle, final long handlerHandle) * Handler callback for iterating over the contents of a batch. */ public abstract static class Handler extends RocksCallbackObject { + /** + * Constructs a Handler. + */ public Handler() { super(0L); } @@ -401,39 +404,182 @@ protected long initializeNative(final long... nativeParameterHandles) { return createNewHandler0(); } + /** + * Put operation callback. + * + * @param columnFamilyId the id of the column family that the operation was performed on. + * @param key the key from the put operation. + * @param value the value from the put operation. + * + * @throws RocksDBException to signal an error from the handler. + */ public abstract void put(final int columnFamilyId, final byte[] key, final byte[] value) throws RocksDBException; + + /** + * Put operation callback. + * + * @param key the key from the put operation. + * @param value the value from the put operation. + */ public abstract void put(final byte[] key, final byte[] value); + + /** + * Merge operation callback. + * + * @param columnFamilyId the id of the column family that the operation was performed on. + * @param key the key from the merge operation. + * @param value the value from the merge operation. + * + * @throws RocksDBException to signal an error from the handler. + */ public abstract void merge(final int columnFamilyId, final byte[] key, final byte[] value) throws RocksDBException; + + /** + * Merge operation callback. + * + * @param key the key from the merge operation. + * @param value the value from the merge operation. + */ public abstract void merge(final byte[] key, final byte[] value); + + /** + * Delete operation callback. + * + * @param columnFamilyId the id of the column family that the operation was performed on. + * @param key the key from the delete operation. + * + * @throws RocksDBException to signal an error from the handler. + */ public abstract void delete(final int columnFamilyId, final byte[] key) throws RocksDBException; + + /** + * Delete operation callback. + * + * @param key the key from the delete operation. + */ public abstract void delete(final byte[] key); + + /** + * Single Delete operation callback. + * + * @param columnFamilyId the id of the column family that the operation was performed on. + * @param key the key from the single delete operation. + * + * @throws RocksDBException to signal an error from the handler. + */ public abstract void singleDelete(final int columnFamilyId, final byte[] key) throws RocksDBException; + + /** + * Single Delete operation callback. + * + * @param key the key from the single delete operation. + */ public abstract void singleDelete(final byte[] key); + + /** + * Delete Range operation callback. + * + * @param columnFamilyId the id of the column family that the operation was performed on. + * @param beginKey the begin key from the delete range operation. + * @param endKey the end key from the delete range operation. + * + * @throws RocksDBException to signal an error from the handler. + */ public abstract void deleteRange(final int columnFamilyId, final byte[] beginKey, final byte[] endKey) throws RocksDBException; + + /** + * Delete Range operation callback. + * + * @param beginKey the begin key from the delete range operation. + * @param endKey the end key from the delete range operation. + */ public abstract void deleteRange(final byte[] beginKey, final byte[] endKey); + + /** + * Log Data operation callback. + * + * @param blob the blob from the log data operation. + */ public abstract void logData(final byte[] blob); + + /** + * Put Blob Index operation callback. + * + * @param columnFamilyId the id of the column family that the operation was performed on. + * @param key the key from the put blob index operation. + * @param value the value from the put blob index operation. + * + * @throws RocksDBException to signal an error from the handler. + */ public abstract void putBlobIndex(final int columnFamilyId, final byte[] key, final byte[] value) throws RocksDBException; + + /** + * Mark Begin Prepare operation callback. + * + * @throws RocksDBException to signal an error from the handler. + */ public abstract void markBeginPrepare() throws RocksDBException; + + /** + * Mark End Prepare operation callback. + * + * @param xid the transaction id. + * + * @throws RocksDBException to signal an error from the handler. + */ public abstract void markEndPrepare(final byte[] xid) throws RocksDBException; + + /** + * Mark Noop operation callback. + * + * @param emptyBatch true if the batch was empty, false otherwise. + * + * @throws RocksDBException to signal an error from the handler. + */ public abstract void markNoop(final boolean emptyBatch) throws RocksDBException; + + /** + * Mark Rollback operation callback. + * + * @param xid the transaction id. + * + * @throws RocksDBException to signal an error from the handler. + */ public abstract void markRollback(final byte[] xid) throws RocksDBException; + + /** + * Mark Commit operation callback. + * + * @param xid the transaction id. + * + * @throws RocksDBException to signal an error from the handler. + */ public abstract void markCommit(final byte[] xid) throws RocksDBException; + + /** + * Mark Commit With Timestamp operation callback. + * + * @param xid the transaction id. + * @param ts the timestamp. + * + * @throws RocksDBException to signal an error from the handler. + */ public abstract void markCommitWithTimestamp(final byte[] xid, final byte[] ts) throws RocksDBException; /** - * shouldContinue is called by the underlying iterator + * Called by the underlying iterator * {@link WriteBatch#iterate(Handler)}. If it returns false, * iteration is halted. Otherwise, it continues * iterating. The default implementation always @@ -457,6 +603,13 @@ public static class SavePoint { private long count; private long contentFlags; + /** + * Constructs a SavePoint. + * + * @param size the size + * @param count the count + * @param contentFlags the content flags + */ public SavePoint(final long size, final long count, final long contentFlags) { this.size = size; @@ -464,6 +617,9 @@ public SavePoint(final long size, final long count, this.contentFlags = contentFlags; } + /** + * Clear the save point data. + */ public void clear() { this.size = 0; this.count = 0; @@ -497,6 +653,12 @@ public long getContentFlags() { return contentFlags; } + /** + * Determines if {@link #clear()} was + * called. + * + * @return true if {@link #clear()} was called and the save point remains empty, false otherwise. + */ public boolean isCleared() { return (size | count | contentFlags) == 0; } diff --git a/java/src/main/java/org/rocksdb/WriteBufferManager.java b/java/src/main/java/org/rocksdb/WriteBufferManager.java index 495fbdb961b9..fd2cf560819f 100644 --- a/java/src/main/java/org/rocksdb/WriteBufferManager.java +++ b/java/src/main/java/org/rocksdb/WriteBufferManager.java @@ -28,10 +28,21 @@ public WriteBufferManager( this.allowStall_ = allowStall; } - public WriteBufferManager(final long bufferSizeBytes, final Cache cache){ + /** + * Construct a new instance of WriteBufferManager. + * + * @param bufferSizeBytes the buffer size in bytes. + * @param cache the cache to use. + */ + public WriteBufferManager(final long bufferSizeBytes, final Cache cache) { this(bufferSizeBytes, cache, false); } + /** + * Determine if the Write Buffer Manager is allowed to stall. + * + * @return true if it is allowed to stall, false otherwise. + */ public boolean allowStall() { return allowStall_; } diff --git a/java/src/main/java/org/rocksdb/WriteStallCondition.java b/java/src/main/java/org/rocksdb/WriteStallCondition.java index 98d9e2ce4adf..c91310374b1d 100644 --- a/java/src/main/java/org/rocksdb/WriteStallCondition.java +++ b/java/src/main/java/org/rocksdb/WriteStallCondition.java @@ -5,9 +5,24 @@ package org.rocksdb; +/** + * Conditions that caused Write Stalls. + */ public enum WriteStallCondition { + + /** + * Delayed. + */ DELAYED((byte) 0x0), + + /** + * Stopped. + */ STOPPED((byte) 0x1), + + /** + * Normal. + */ NORMAL((byte) 0x2); private final byte value; diff --git a/java/src/main/java/org/rocksdb/WriteStallInfo.java b/java/src/main/java/org/rocksdb/WriteStallInfo.java index 1cade0acb8ed..9cccc0a95971 100644 --- a/java/src/main/java/org/rocksdb/WriteStallInfo.java +++ b/java/src/main/java/org/rocksdb/WriteStallInfo.java @@ -7,6 +7,9 @@ import java.util.Objects; +/** + * Information on a Write Stall. + */ public class WriteStallInfo { private final String columnFamilyName; private final WriteStallCondition currentCondition; diff --git a/java/src/main/java/org/rocksdb/util/BufferUtil.java b/java/src/main/java/org/rocksdb/util/BufferUtil.java index 54be3e6937d6..4c23cfba86e5 100644 --- a/java/src/main/java/org/rocksdb/util/BufferUtil.java +++ b/java/src/main/java/org/rocksdb/util/BufferUtil.java @@ -6,7 +6,20 @@ package org.rocksdb.util; +/** + * Utility functions for working with buffers. + */ public class BufferUtil { + + /** + * Check the bounds for an operation on a buffer. + * + * @param offset the offset + * @param len the length + * @param size the size + * + * @throws IndexOutOfBoundsException if the values are out of bounds + */ public static void CheckBounds(final int offset, final int len, final int size) { if ((offset | len | (offset + len) | (size - (offset + len))) < 0) { throw new IndexOutOfBoundsException( diff --git a/java/src/main/java/org/rocksdb/util/ByteUtil.java b/java/src/main/java/org/rocksdb/util/ByteUtil.java index 5d64d5dcf29a..8c32668ca8b9 100644 --- a/java/src/main/java/org/rocksdb/util/ByteUtil.java +++ b/java/src/main/java/org/rocksdb/util/ByteUtil.java @@ -10,6 +10,9 @@ import static java.nio.charset.StandardCharsets.UTF_8; +/** + * Simple utility functions for working with bytes. + */ public class ByteUtil { /** @@ -29,7 +32,7 @@ public static byte[] bytes(final String str) { * lexically less than {@code y}, or a value greater than zero if {@code x} * is lexically greater than {@code y}. Note that lexical order is determined * as if comparing unsigned char arrays. - * + *

* Similar to memcmp.c. * * @param x the first value to compare with diff --git a/java/src/main/java/org/rocksdb/util/BytewiseComparator.java b/java/src/main/java/org/rocksdb/util/BytewiseComparator.java index fd55fdf8c57c..fb32db3be0da 100644 --- a/java/src/main/java/org/rocksdb/util/BytewiseComparator.java +++ b/java/src/main/java/org/rocksdb/util/BytewiseComparator.java @@ -23,6 +23,11 @@ */ public final class BytewiseComparator extends AbstractComparator { + /** + * Constructs a new BytewiseComparator. + * + * @param copt the configuration options for the comparator. + */ public BytewiseComparator(final ComparatorOptions copt) { super(copt); } diff --git a/java/src/main/java/org/rocksdb/util/Environment.java b/java/src/main/java/org/rocksdb/util/Environment.java index 78b73dc5d432..f6b2fa505657 100644 --- a/java/src/main/java/org/rocksdb/util/Environment.java +++ b/java/src/main/java/org/rocksdb/util/Environment.java @@ -5,6 +5,9 @@ import java.io.IOException; import java.util.Locale; +/** + * Provides information about the environment in which RocksJava is executing. + */ public class Environment { @SuppressWarnings("FieldMayBeFinal") private static String OS = System.getProperty("os.name").toLowerCase(Locale.getDefault()); @@ -24,38 +27,83 @@ public class Environment { */ private static Boolean MUSL_LIBC = null; + /** + * Returns true if the CPU architecture is aarch64. + * + * @return true if the CPU architecture is aarch64, false otherwise. + */ public static boolean isAarch64() { return ARCH.contains("aarch64"); } + /** + * Returns true if the CPU architecture is ppc. + * + * @return true if the CPU architecture is ppc, false otherwise. + */ public static boolean isPowerPC() { return ARCH.contains("ppc"); } + /** + * Returns true if the CPU architecture is s390x. + * + * @return true if the CPU architecture is s390x, false otherwise. + */ public static boolean isS390x() { return ARCH.contains("s390x"); } + /** + * Returns true if the CPU architecture is riscv64. + * + * @return true if the CPU architecture is riscv64, false otherwise. + */ public static boolean isRiscv64() { return ARCH.contains("riscv64"); } + /** + * Returns true if the OS is Windows. + * + * @return true if the OS is Windows, false otherwise. + */ public static boolean isWindows() { return (OS.contains("win")); } + /** + * Returns true if the OS is FreeBSD. + * + * @return true if the OS is FreeBSD, false otherwise. + */ public static boolean isFreeBSD() { return (OS.contains("freebsd")); } + /** + * Returns true if the OS is Mac. + * + * @return true if the OS is Mac, false otherwise. + */ public static boolean isMac() { return (OS.contains("mac")); } + /** + * Returns true if the OS is AIX. + * + * @return true if the OS is AIX, false otherwise. + */ public static boolean isAix() { return OS.contains("aix"); } - + + /** + * Returns true if the OS is Unix. + * + * @return true if the OS is Unix, false otherwise. + */ public static boolean isUnix() { return OS.contains("nix") || OS.contains("nux"); @@ -75,9 +123,9 @@ public static boolean isMuslLibc() { /** * Determine if the environment has a musl libc. - * + *

* The initialisation counterpart of {@link #isMuslLibc()}. - * + *

* Intentionally package-private for testing. * * @return true if the environment has a musl libc, false otherwise. @@ -136,14 +184,29 @@ static boolean initIsMuslLibc() { return false; } + /** + * Returns true if the OS is Solaris. + * + * @return true if the OS is Solaris, false otherwise. + */ public static boolean isSolaris() { return OS.contains("sunos"); } + /** + * Returns true if the OS is OpenBSD. + * + * @return true if the OS is OpenBSD, false otherwise. + */ public static boolean isOpenBSD() { return (OS.contains("openbsd")); } + /** + * Returns true if the system architecture is 64 bit. + * + * @return true if the system architecture is 64 bit, false otherwise. + */ public static boolean is64Bit() { if (ARCH.contains(SPARCV9)) { return true; @@ -151,10 +214,24 @@ public static boolean is64Bit() { return (ARCH.indexOf("64") > 0); } + /** + * Get the name as that of a shared JNI library. + * + * @param name the name. + * + * @return the name of the shared JNI library. + */ public static String getSharedLibraryName(final String name) { return name + "jni"; } + /** + * Get the filename as that of a shared JNI library. + * + * @param name the name. + * + * @return the filename of the shared JNI library. + */ public static String getSharedLibraryFileName(final String name) { return appendLibOsSuffix("lib" + getSharedLibraryName(name), true); } @@ -181,6 +258,16 @@ private static String getLibcPostfix() { return "-" + libcName; } + + /** + * Get the name as that of a JNI library. + *

+ * Deals with platform and architecture specific naming. + * + * @param name the name. + * + * @return the name of the JNI library. + */ public static String getJniLibraryName(final String name) { if (isUnix()) { final String arch = is64Bit() ? "64" : "32"; @@ -219,6 +306,15 @@ public static String getJniLibraryName(final String name) { throw new UnsupportedOperationException(String.format("Cannot determine JNI library name for ARCH='%s' OS='%s' name='%s'", ARCH, OS, name)); } + /** + * Get a fallback name as that of a JNI library. + *

+ * Deals with platform and architecture specific naming. + * + * @param name the name. + * + * @return the fallback name of the JNI library. + */ public static /*@Nullable*/ String getFallbackJniLibraryName(final String name) { if (isMac() && is64Bit()) { return String.format("%sjni-osx", name); @@ -226,10 +322,28 @@ public static String getJniLibraryName(final String name) { return null; } + /** + * Get the filename as that of a JNI library. + *

+ * Deals with platform and architecture specific naming. + * + * @param name the name. + * + * @return the filename of the JNI library. + */ public static String getJniLibraryFileName(final String name) { return appendLibOsSuffix("lib" + getJniLibraryName(name), false); } + /** + * Get the fallback filename as that of a JNI library. + *

+ * Deals with platform and architecture specific naming. + * + * @param name the name. + * + * @return the fallback filename of the JNI library. + */ public static /*@Nullable*/ String getFallbackJniLibraryFileName(final String name) { final String fallbackJniLibraryName = getFallbackJniLibraryName(name); if (fallbackJniLibraryName == null) { @@ -249,6 +363,13 @@ private static String appendLibOsSuffix(final String libraryFileName, final bool throw new UnsupportedOperationException(); } + /** + * Get the filename extension used for a JNI library. + *

+ * Deals with platform and architecture specific naming. + * + * @return the filename extension. + */ public static String getJniLibraryExtension() { if (isWindows()) { return ".dll"; diff --git a/java/src/main/java/org/rocksdb/util/IntComparator.java b/java/src/main/java/org/rocksdb/util/IntComparator.java index 2caf0c601572..cf3c6423f08c 100644 --- a/java/src/main/java/org/rocksdb/util/IntComparator.java +++ b/java/src/main/java/org/rocksdb/util/IntComparator.java @@ -13,11 +13,11 @@ /** * This is a Java implementation of a Comparator for Java int * keys. - * + *

* This comparator assumes keys are (at least) four bytes, so * the caller must guarantee that in accessing other APIs in * combination with this comparator. - * + *

* The performance of Comparators implemented in Java is always * less than their C++ counterparts due to the bridging overhead, * as such you likely don't want to use this apart from benchmarking @@ -25,8 +25,13 @@ */ public final class IntComparator extends AbstractComparator { - public IntComparator(final ComparatorOptions copt) { - super(copt); + /** + * Constructs an IntComparator. + * + * @param comparatorOptions the options for the comparator. + */ + public IntComparator(final ComparatorOptions comparatorOptions) { + super(comparatorOptions); } @Override diff --git a/java/src/main/java/org/rocksdb/util/ReverseBytewiseComparator.java b/java/src/main/java/org/rocksdb/util/ReverseBytewiseComparator.java index 3d3c429416b0..e145184eac6c 100644 --- a/java/src/main/java/org/rocksdb/util/ReverseBytewiseComparator.java +++ b/java/src/main/java/org/rocksdb/util/ReverseBytewiseComparator.java @@ -24,6 +24,11 @@ */ public final class ReverseBytewiseComparator extends AbstractComparator { + /** + * Constructs a ReverseBytewiseComparator. + * + * @param copt the comparator options. + */ public ReverseBytewiseComparator(final ComparatorOptions copt) { super(copt); } diff --git a/java/src/main/java/org/rocksdb/util/SizeUnit.java b/java/src/main/java/org/rocksdb/util/SizeUnit.java index 0f717e8d4540..8582bb15436b 100644 --- a/java/src/main/java/org/rocksdb/util/SizeUnit.java +++ b/java/src/main/java/org/rocksdb/util/SizeUnit.java @@ -5,12 +5,33 @@ package org.rocksdb.util; -public class SizeUnit { - public static final long KB = 1024L; - public static final long MB = KB * KB; - public static final long GB = KB * MB; - public static final long TB = KB * GB; - public static final long PB = KB * TB; +/** + * Simple factors of byte sizes. + */ +public interface SizeUnit { - private SizeUnit() {} + /** + * 1 Kilobyte. + */ + long KB = 1024L; + + /** + * 1 Megabyte. + */ + long MB = KB * KB; + + /** + * 1 Gigabyte. + */ + long GB = KB * MB; + + /** + * 1 Terabyte. + */ + long TB = KB * GB; + + /** + * 1 Petabyte. + */ + long PB = KB * TB; } diff --git a/java/src/test/java/org/rocksdb/CompactRangeOptionsTest.java b/java/src/test/java/org/rocksdb/CompactRangeOptionsTest.java index 549b74beb1cc..9c6689ea8906 100644 --- a/java/src/test/java/org/rocksdb/CompactRangeOptionsTest.java +++ b/java/src/test/java/org/rocksdb/CompactRangeOptionsTest.java @@ -125,14 +125,14 @@ public void fullHistoryTSLowDefault() { @Test public void canceled() { CompactRangeOptions opt = new CompactRangeOptions(); - assertThat(opt.canceled()).isEqualTo(false); - opt.setCanceled(true); - assertThat(opt.canceled()).isEqualTo(true); - opt.setCanceled(false); - assertThat(opt.canceled()).isEqualTo(false); - opt.setCanceled(true); - assertThat(opt.canceled()).isEqualTo(true); - opt.setCanceled(true); - assertThat(opt.canceled()).isEqualTo(true); + assertThat(opt.cancelled()).isEqualTo(false); + opt.setCancelled(true); + assertThat(opt.cancelled()).isEqualTo(true); + opt.setCancelled(false); + assertThat(opt.cancelled()).isEqualTo(false); + opt.setCancelled(true); + assertThat(opt.cancelled()).isEqualTo(true); + opt.setCancelled(true); + assertThat(opt.cancelled()).isEqualTo(true); } } diff --git a/java/src/test/java/org/rocksdb/SstFileManagerTest.java b/java/src/test/java/org/rocksdb/SstFileManagerTest.java index 2e136e820035..96c5627096d8 100644 --- a/java/src/test/java/org/rocksdb/SstFileManagerTest.java +++ b/java/src/test/java/org/rocksdb/SstFileManagerTest.java @@ -47,7 +47,7 @@ public void trackedFiles() throws RocksDBException { @Test public void deleteRateBytesPerSecond() throws RocksDBException { try (final SstFileManager sstFileManager = new SstFileManager(Env.getDefault())) { - assertThat(sstFileManager.getDeleteRateBytesPerSecond()).isEqualTo(SstFileManager.RATE_BYTES_PER_SEC_DEFAULT); + assertThat(sstFileManager.getDeleteRateBytesPerSecond()).isEqualTo(SstFileManager.DEFAULT_RATE_BYTES_PER_SEC); final long ratePerSecond = 1024 * 1024 * 52; sstFileManager.setDeleteRateBytesPerSecond(ratePerSecond); assertThat(sstFileManager.getDeleteRateBytesPerSecond()).isEqualTo(ratePerSecond); @@ -57,7 +57,7 @@ public void deleteRateBytesPerSecond() throws RocksDBException { @Test public void maxTrashDBRatio() throws RocksDBException { try (final SstFileManager sstFileManager = new SstFileManager(Env.getDefault())) { - assertThat(sstFileManager.getMaxTrashDBRatio()).isEqualTo(SstFileManager.MAX_TRASH_DB_RATION_DEFAULT); + assertThat(sstFileManager.getMaxTrashDBRatio()).isEqualTo(SstFileManager.DEFAULT_MAX_TRASH_DB_RATIO); final double trashRatio = 0.2; sstFileManager.setMaxTrashDBRatio(trashRatio); assertThat(sstFileManager.getMaxTrashDBRatio()).isEqualTo(trashRatio); diff --git a/java/src/test/java/org/rocksdb/util/ByteBufferAllocator.java b/java/src/test/java/org/rocksdb/util/ByteBufferAllocator.java index 8d7956cf27f6..c3309d787878 100644 --- a/java/src/test/java/org/rocksdb/util/ByteBufferAllocator.java +++ b/java/src/test/java/org/rocksdb/util/ByteBufferAllocator.java @@ -8,7 +8,18 @@ import java.nio.ByteBuffer; +/** + * Allocates for creating new buffers. + */ public interface ByteBufferAllocator { + + /** + * Allocate a new ByteBuffer. + * + * @param capacity the capacity of the buffer. + * + * @return the new ByteBuffer. + */ ByteBuffer allocate(int capacity); ByteBufferAllocator DIRECT = new DirectByteBufferAllocator(); diff --git a/java/src/test/java/org/rocksdb/util/CapturingWriteBatchHandler.java b/java/src/test/java/org/rocksdb/util/CapturingWriteBatchHandler.java index 8ea104332cb1..8b06fbfabca0 100644 --- a/java/src/test/java/org/rocksdb/util/CapturingWriteBatchHandler.java +++ b/java/src/test/java/org/rocksdb/util/CapturingWriteBatchHandler.java @@ -124,16 +124,34 @@ public void markCommitWithTimestamp(final byte[] xid, final byte[] ts) throws Ro events.add(new Event(Action.MARK_COMMIT_WITH_TIMESTAMP, (byte[]) null, (byte[]) null)); } + /** + * Event received by the handler. + */ public static class Event { public final Action action; public final int columnFamilyId; public final byte[] key; public final byte[] value; + /** + * Construct an event. + * + * @param action the action of the event + * @param key the key of the event + * @param value the value of the event + */ public Event(final Action action, final byte[] key, final byte[] value) { this(action, 0, key, value); } + /** + * Construct an event. + * + * @param action the action of the event + * @param columnFamilyId the id of the column family of the event + * @param key the key of the event + * @param value the value of the event + */ public Event(final Action action, final int columnFamilyId, final byte[] key, final byte[] value) { this.action = action; From 4c83728119d99436258eb64a19b2707bba40723c Mon Sep 17 00:00:00 2001 From: Adam Retter Date: Thu, 21 Dec 2023 20:23:29 +0100 Subject: [PATCH 07/15] Add CI Job to enforce correct JavaDocs --- .github/workflows/pr-jobs.yml | 30 +++++++++++++++++++++++++++++- java/Makefile | 2 +- 2 files changed, 30 insertions(+), 2 deletions(-) diff --git a/.github/workflows/pr-jobs.yml b/.github/workflows/pr-jobs.yml index a3cfcdbce73e..20e2bc0c4916 100644 --- a/.github/workflows/pr-jobs.yml +++ b/.github/workflows/pr-jobs.yml @@ -444,7 +444,35 @@ jobs: - name: Build RocksDBJava Static Library # NOTE: replaced scl enable devtoolset-7 'make V=1 J=8 -j8 rocksdbjavastatic' run: make V=1 J=8 -j8 rocksdbjavastatic - # post-steps skipped because of compatibility issues with docker image + check-javadocs: + if: ${{ github.repository_owner == 'facebook' }} + runs-on: + labels: 4-core-ubuntu + container: + image: evolvedbinary/rocksjava:centos7_x64-be + options: --shm-size=16gb + steps: + # The docker image is based on such an old OS that it has a GLIBC + # incompatibility with actions/checkout and other actions. Thus we + # implement a manual checkout step. + - name: Checkout + env: + GH_TOKEN: ${{ github.token }} + run: | + chown `whoami` . || true + git clone --no-checkout https://oath2:$GH_TOKEN@github.com/${{ github.repository }}.git . + git -c protocol.version=2 fetch --update-head-ok --no-tags --prune --no-recurse-submodules --depth=1 origin +${{ github.sha }}:${{ github.ref }} + git checkout --progress --force ${{ github.ref }} + git log -1 --format='%H' + - uses: "./.github/actions/pre-steps" + - name: Set Java Environment + run: |- + echo "JAVA_HOME=${JAVA_HOME}" + which java && java -version + which javac && javac -version + - name: Check RocksDBJava JavaDocs + run: scl enable devtoolset-7 'pushd java; make V=1 J=8 -j8 javadocs' + # NOTE: post-steps skipped because of compatibility issues with docker image build-macos-java: if: ${{ github.repository_owner == 'facebook' }} runs-on: macos-15-xlarge diff --git a/java/Makefile b/java/Makefile index 5e00921c62b9..70c0d7f1b007 100644 --- a/java/Makefile +++ b/java/Makefile @@ -343,7 +343,7 @@ clean-downloaded: javadocs: java $(AM_V_GEN)mkdir -p $(JAVADOC) - $(AM_V_at)$(JAVADOC_CMD) -d $(JAVADOC) -sourcepath $(MAIN_SRC) -subpackages org + $(AM_V_at)$(JAVADOC_CMD) -Xwerror -d $(JAVADOC) -sourcepath $(MAIN_SRC) -subpackages org javalib: java java_test javadocs From 896f8e1c51ffe7c17a69343cc6d6981c48673e7a Mon Sep 17 00:00:00 2001 From: Adam Retter Date: Thu, 21 Dec 2023 21:56:44 +0100 Subject: [PATCH 08/15] Ran 'make format' --- .../org/rocksdb/AbstractCompactionFilter.java | 4 +-- .../AbstractCompactionFilterFactory.java | 1 - .../org/rocksdb/AbstractEventListener.java | 2 -- .../org/rocksdb/AbstractRocksIterator.java | 3 +- .../main/java/org/rocksdb/AbstractSlice.java | 4 +-- .../java/org/rocksdb/AbstractTableFilter.java | 1 - .../rocksdb/AbstractTransactionNotifier.java | 1 - .../java/org/rocksdb/AbstractWriteBatch.java | 4 +-- .../main/java/org/rocksdb/BackupEngine.java | 4 +-- .../org/rocksdb/BlockBasedTableConfig.java | 1 - java/src/main/java/org/rocksdb/Cache.java | 5 ++- .../rocksdb/CassandraCompactionFilter.java | 1 - .../rocksdb/CassandraValueMergeOperator.java | 1 - .../src/main/java/org/rocksdb/ClockCache.java | 4 +-- .../java/org/rocksdb/CompactionJobInfo.java | 1 - .../java/org/rocksdb/CompactionJobStats.java | 1 - .../java/org/rocksdb/CompactionOptions.java | 1 - .../org/rocksdb/CompactionOptionsFIFO.java | 1 - .../rocksdb/CompactionOptionsUniversal.java | 1 - .../java/org/rocksdb/CompactionReason.java | 1 - .../java/org/rocksdb/CompressionOptions.java | 3 +- .../org/rocksdb/ConcurrentTaskLimiter.java | 3 +- .../rocksdb/ConcurrentTaskLimiterImpl.java | 1 - .../main/java/org/rocksdb/DirectSlice.java | 1 - java/src/main/java/org/rocksdb/Filter.java | 4 +-- java/src/main/java/org/rocksdb/GetStatus.java | 1 - .../rocksdb/HashLinkedListMemTableConfig.java | 1 - .../rocksdb/HashSkipListMemTableConfig.java | 1 - .../rocksdb/ImportColumnFamilyOptions.java | 1 - .../rocksdb/IngestExternalFileOptions.java | 1 - .../main/java/org/rocksdb/KeyMayExist.java | 3 +- .../main/java/org/rocksdb/MergeOperator.java | 18 +++++------ .../java/org/rocksdb/MutableOptionKey.java | 1 - .../org/rocksdb/OptimisticTransactionDB.java | 4 +-- .../rocksdb/OptimisticTransactionOptions.java | 1 - .../main/java/org/rocksdb/PerfContext.java | 10 +++--- .../java/org/rocksdb/PersistentCache.java | 1 - .../java/org/rocksdb/PlainTableConfig.java | 1 - .../org/rocksdb/PrepopulateBlobCache.java | 1 - java/src/main/java/org/rocksdb/Priority.java | 1 - .../main/java/org/rocksdb/RateLimiter.java | 1 - .../java/org/rocksdb/RateLimiterMode.java | 1 - .../main/java/org/rocksdb/ReadOptions.java | 1 - java/src/main/java/org/rocksdb/ReadTier.java | 1 - .../RemoveEmptyValueCompactionFilter.java | 1 - .../java/org/rocksdb/RocksCallbackObject.java | 4 +-- java/src/main/java/org/rocksdb/RocksDB.java | 1 - .../main/java/org/rocksdb/RocksIterator.java | 4 +-- .../java/org/rocksdb/RocksMutableObject.java | 6 ++-- .../main/java/org/rocksdb/RocksObject.java | 6 ++-- .../main/java/org/rocksdb/SanityLevel.java | 1 - .../org/rocksdb/SizeApproximationFlag.java | 1 - .../org/rocksdb/SkipListMemTableConfig.java | 1 - .../main/java/org/rocksdb/SstFileManager.java | 4 +-- .../main/java/org/rocksdb/SstFileReader.java | 1 - .../org/rocksdb/SstFileReaderIterator.java | 4 +-- .../org/rocksdb/SstPartitionerFactory.java | 3 +- .../SstPartitionerFixedPrefixFactory.java | 1 - java/src/main/java/org/rocksdb/StateType.java | 1 - .../src/main/java/org/rocksdb/Statistics.java | 1 - java/src/main/java/org/rocksdb/Status.java | 10 +++--- .../org/rocksdb/TableFileCreationReason.java | 1 - .../main/java/org/rocksdb/Transaction.java | 6 ++-- .../org/rocksdb/TransactionDBOptions.java | 32 +++++++------------ .../java/org/rocksdb/TransactionOptions.java | 1 - .../java/org/rocksdb/UInt64AddOperator.java | 12 +++---- .../org/rocksdb/VectorMemTableConfig.java | 1 - .../java/org/rocksdb/WBWIRocksIterator.java | 9 +++--- java/src/main/java/org/rocksdb/WalFilter.java | 1 - .../src/main/java/org/rocksdb/WriteBatch.java | 3 +- .../java/org/rocksdb/WriteBufferManager.java | 12 +++---- .../java/org/rocksdb/WriteStallCondition.java | 1 - .../java/org/rocksdb/util/BufferUtil.java | 1 - .../main/java/org/rocksdb/util/ByteUtil.java | 3 +- .../org/rocksdb/util/BytewiseComparator.java | 1 - .../java/org/rocksdb/util/Environment.java | 1 - .../java/org/rocksdb/util/IntComparator.java | 1 - .../util/ReverseBytewiseComparator.java | 1 - .../main/java/org/rocksdb/util/SizeUnit.java | 1 - .../java/org/rocksdb/SstFileManagerTest.java | 6 ++-- .../org/rocksdb/util/ByteBufferAllocator.java | 1 - 81 files changed, 100 insertions(+), 149 deletions(-) diff --git a/java/src/main/java/org/rocksdb/AbstractCompactionFilter.java b/java/src/main/java/org/rocksdb/AbstractCompactionFilter.java index 1f4a5e4a687c..45ad20e2cad0 100644 --- a/java/src/main/java/org/rocksdb/AbstractCompactionFilter.java +++ b/java/src/main/java/org/rocksdb/AbstractCompactionFilter.java @@ -15,7 +15,6 @@ */ public abstract class AbstractCompactionFilter> extends RocksObject { - /** * Context of the Compaction Filter. */ @@ -58,7 +57,8 @@ public boolean isManualCompaction() { * Constructor to be called by subclasses to set the * handle to the underlying C++ object. * - * @param nativeHandle reference to the value of the C++ pointer pointing to the underlying native RocksDB C++ Compaction Filter. + * @param nativeHandle reference to the value of the C++ pointer pointing to the underlying native + * RocksDB C++ Compaction Filter. */ protected AbstractCompactionFilter(final long nativeHandle) { super(nativeHandle); diff --git a/java/src/main/java/org/rocksdb/AbstractCompactionFilterFactory.java b/java/src/main/java/org/rocksdb/AbstractCompactionFilterFactory.java index 0cf7814101c1..2c6fc0f68a48 100644 --- a/java/src/main/java/org/rocksdb/AbstractCompactionFilterFactory.java +++ b/java/src/main/java/org/rocksdb/AbstractCompactionFilterFactory.java @@ -13,7 +13,6 @@ */ public abstract class AbstractCompactionFilterFactory> extends RocksCallbackObject { - /** * Constructs a new Compaction Filter Factory which has no underlying C++ object. */ diff --git a/java/src/main/java/org/rocksdb/AbstractEventListener.java b/java/src/main/java/org/rocksdb/AbstractEventListener.java index 5c7f58ab6afb..5f29024bf063 100644 --- a/java/src/main/java/org/rocksdb/AbstractEventListener.java +++ b/java/src/main/java/org/rocksdb/AbstractEventListener.java @@ -12,12 +12,10 @@ */ @SuppressWarnings("PMD.AvoidDuplicateLiterals") public abstract class AbstractEventListener extends RocksCallbackObject implements EventListener { - /** * Callback events that can be enabled. */ public enum EnabledEventCallback { - /** * Flush completed. */ diff --git a/java/src/main/java/org/rocksdb/AbstractRocksIterator.java b/java/src/main/java/org/rocksdb/AbstractRocksIterator.java index 734b202c8a9c..4c744dc82e33 100644 --- a/java/src/main/java/org/rocksdb/AbstractRocksIterator.java +++ b/java/src/main/java/org/rocksdb/AbstractRocksIterator.java @@ -29,7 +29,8 @@ public abstract class AbstractRocksIterator

* Constructs an AbstractRocksIterator. * * @param parent the parent object from which the Rocks Iterator was created. - * @param nativeHandle reference to the value of the C++ pointer pointing to the underlying native RocksDB C++ RocksIterator. + * @param nativeHandle reference to the value of the C++ pointer pointing to the underlying native + * RocksDB C++ RocksIterator. */ protected AbstractRocksIterator(final P parent, final long nativeHandle) { diff --git a/java/src/main/java/org/rocksdb/AbstractSlice.java b/java/src/main/java/org/rocksdb/AbstractSlice.java index ad037652ac3f..0d00a056a2da 100644 --- a/java/src/main/java/org/rocksdb/AbstractSlice.java +++ b/java/src/main/java/org/rocksdb/AbstractSlice.java @@ -27,7 +27,6 @@ * @param the concrete Java type that is wrapped by the subclass of {@link AbstractSlice}. */ public abstract class AbstractSlice extends RocksMutableObject { - /** * Constructs an AbstractSlice. */ @@ -38,7 +37,8 @@ protected AbstractSlice() { /** * Constructs an AbstractSlice. * - * @param nativeHandle reference to the value of the C++ pointer pointing to the underlying native RocksDB C++ Slice. + * @param nativeHandle reference to the value of the C++ pointer pointing to the underlying native + * RocksDB C++ Slice. */ protected AbstractSlice(final long nativeHandle) { super(nativeHandle); diff --git a/java/src/main/java/org/rocksdb/AbstractTableFilter.java b/java/src/main/java/org/rocksdb/AbstractTableFilter.java index 0c7f994fbee8..b24ce8692dd4 100644 --- a/java/src/main/java/org/rocksdb/AbstractTableFilter.java +++ b/java/src/main/java/org/rocksdb/AbstractTableFilter.java @@ -6,7 +6,6 @@ */ public abstract class AbstractTableFilter extends RocksCallbackObject implements TableFilter { - /** * Constructs a new AbstractTableFilter. */ diff --git a/java/src/main/java/org/rocksdb/AbstractTransactionNotifier.java b/java/src/main/java/org/rocksdb/AbstractTransactionNotifier.java index 5b6ed1b5b508..35e69301e44c 100644 --- a/java/src/main/java/org/rocksdb/AbstractTransactionNotifier.java +++ b/java/src/main/java/org/rocksdb/AbstractTransactionNotifier.java @@ -11,7 +11,6 @@ */ public abstract class AbstractTransactionNotifier extends RocksCallbackObject { - /** * Constructs an AbstractTransactionNotifier. */ diff --git a/java/src/main/java/org/rocksdb/AbstractWriteBatch.java b/java/src/main/java/org/rocksdb/AbstractWriteBatch.java index 59e253e75b0f..33ca8d23aa29 100644 --- a/java/src/main/java/org/rocksdb/AbstractWriteBatch.java +++ b/java/src/main/java/org/rocksdb/AbstractWriteBatch.java @@ -22,11 +22,11 @@ */ public abstract class AbstractWriteBatch extends RocksObject implements WriteBatchInterface { - /** * Construct an AbstractWriteBatch. * - * @param nativeHandle reference to the value of the C++ pointer pointing to the underlying native RocksDB C++ Write Batch object. + * @param nativeHandle reference to the value of the C++ pointer pointing to the underlying native + * RocksDB C++ Write Batch object. */ protected AbstractWriteBatch(final long nativeHandle) { super(nativeHandle); diff --git a/java/src/main/java/org/rocksdb/BackupEngine.java b/java/src/main/java/org/rocksdb/BackupEngine.java index 0b3e567af632..b07afbf8fc36 100644 --- a/java/src/main/java/org/rocksdb/BackupEngine.java +++ b/java/src/main/java/org/rocksdb/BackupEngine.java @@ -18,11 +18,11 @@ * time you need to do a backup. */ public class BackupEngine extends RocksObject implements AutoCloseable { - /** * Construct a BackupEngine. * - * @param nativeHandle reference to the value of the C++ pointer pointing to the underlying native RocksDB C++ backup engine object. + * @param nativeHandle reference to the value of the C++ pointer pointing to the underlying native + * RocksDB C++ backup engine object. */ protected BackupEngine(final long nativeHandle) { super(nativeHandle); diff --git a/java/src/main/java/org/rocksdb/BlockBasedTableConfig.java b/java/src/main/java/org/rocksdb/BlockBasedTableConfig.java index 072ccc8fc6c3..cdc86392961e 100644 --- a/java/src/main/java/org/rocksdb/BlockBasedTableConfig.java +++ b/java/src/main/java/org/rocksdb/BlockBasedTableConfig.java @@ -11,7 +11,6 @@ */ // TODO(AR) should be renamed BlockBasedTableOptions public class BlockBasedTableConfig extends TableFormatConfig { - /** * Constructs a new BlockBasedTableConfig. */ diff --git a/java/src/main/java/org/rocksdb/Cache.java b/java/src/main/java/org/rocksdb/Cache.java index fc814c94beed..c1e4812031d2 100644 --- a/java/src/main/java/org/rocksdb/Cache.java +++ b/java/src/main/java/org/rocksdb/Cache.java @@ -5,16 +5,15 @@ package org.rocksdb; - /** * Base class for Cache implementations. */ public abstract class Cache extends RocksObject { - /** * Construct a Cache. * - * @param nativeHandle reference to the value of the C++ pointer pointing to the underlying native RocksDB C++ cache object. + * @param nativeHandle reference to the value of the C++ pointer pointing to the underlying native + * RocksDB C++ cache object. */ protected Cache(final long nativeHandle) { super(nativeHandle); diff --git a/java/src/main/java/org/rocksdb/CassandraCompactionFilter.java b/java/src/main/java/org/rocksdb/CassandraCompactionFilter.java index b452a54c3b73..c2705ed4e13e 100644 --- a/java/src/main/java/org/rocksdb/CassandraCompactionFilter.java +++ b/java/src/main/java/org/rocksdb/CassandraCompactionFilter.java @@ -14,7 +14,6 @@ */ public class CassandraCompactionFilter extends AbstractCompactionFilter { - /** * Constructs a new CasandraCompactionFilter. * diff --git a/java/src/main/java/org/rocksdb/CassandraValueMergeOperator.java b/java/src/main/java/org/rocksdb/CassandraValueMergeOperator.java index c6e87b9e836d..597a18efd9ae 100644 --- a/java/src/main/java/org/rocksdb/CassandraValueMergeOperator.java +++ b/java/src/main/java/org/rocksdb/CassandraValueMergeOperator.java @@ -12,7 +12,6 @@ * values. */ public class CassandraValueMergeOperator extends MergeOperator { - /** * Constructs a new CassandraValueMergeOperator. * diff --git a/java/src/main/java/org/rocksdb/ClockCache.java b/java/src/main/java/org/rocksdb/ClockCache.java index 171f5596a90f..452ef33f7b2c 100644 --- a/java/src/main/java/org/rocksdb/ClockCache.java +++ b/java/src/main/java/org/rocksdb/ClockCache.java @@ -62,8 +62,8 @@ public ClockCache(final long capacity, final int numShardBits) { * @param strictCapacityLimit insert to the cache will fail when cache is full */ @Deprecated - public ClockCache(final long capacity, final int numShardBits, - final boolean strictCapacityLimit) { + public ClockCache( + final long capacity, final int numShardBits, final boolean strictCapacityLimit) { super(newClockCache(capacity, numShardBits, strictCapacityLimit)); } diff --git a/java/src/main/java/org/rocksdb/CompactionJobInfo.java b/java/src/main/java/org/rocksdb/CompactionJobInfo.java index 309ca53067fa..96f50120ba5a 100644 --- a/java/src/main/java/org/rocksdb/CompactionJobInfo.java +++ b/java/src/main/java/org/rocksdb/CompactionJobInfo.java @@ -13,7 +13,6 @@ * Information about a Compaction Job. */ public class CompactionJobInfo extends RocksObject { - /** * Constructs a new CompactionJobInfo. */ diff --git a/java/src/main/java/org/rocksdb/CompactionJobStats.java b/java/src/main/java/org/rocksdb/CompactionJobStats.java index fb5c950ee859..f02799d823ec 100644 --- a/java/src/main/java/org/rocksdb/CompactionJobStats.java +++ b/java/src/main/java/org/rocksdb/CompactionJobStats.java @@ -9,7 +9,6 @@ * Statistics about a Compaction Job. */ public class CompactionJobStats extends RocksObject { - /** * Constructs a new CompactionJobStats. */ diff --git a/java/src/main/java/org/rocksdb/CompactionOptions.java b/java/src/main/java/org/rocksdb/CompactionOptions.java index 5cb791806013..47ebfc8d0f28 100644 --- a/java/src/main/java/org/rocksdb/CompactionOptions.java +++ b/java/src/main/java/org/rocksdb/CompactionOptions.java @@ -13,7 +13,6 @@ * calls. */ public class CompactionOptions extends RocksObject { - /** * Constructs a new CompactionOptions. */ diff --git a/java/src/main/java/org/rocksdb/CompactionOptionsFIFO.java b/java/src/main/java/org/rocksdb/CompactionOptionsFIFO.java index 4d359dd814a2..7ea28695815d 100644 --- a/java/src/main/java/org/rocksdb/CompactionOptionsFIFO.java +++ b/java/src/main/java/org/rocksdb/CompactionOptionsFIFO.java @@ -9,7 +9,6 @@ * Options for FIFO Compaction */ public class CompactionOptionsFIFO extends RocksObject { - /** * Constructs a new CompactionOptionsFIFO. */ diff --git a/java/src/main/java/org/rocksdb/CompactionOptionsUniversal.java b/java/src/main/java/org/rocksdb/CompactionOptionsUniversal.java index 191234c4c649..54013b071cfd 100644 --- a/java/src/main/java/org/rocksdb/CompactionOptionsUniversal.java +++ b/java/src/main/java/org/rocksdb/CompactionOptionsUniversal.java @@ -9,7 +9,6 @@ * Options for Universal Compaction */ public class CompactionOptionsUniversal extends RocksObject { - /** * Constructs a new CompactionOptionsUniversal. */ diff --git a/java/src/main/java/org/rocksdb/CompactionReason.java b/java/src/main/java/org/rocksdb/CompactionReason.java index a6050c716a18..68828c3a9ef5 100644 --- a/java/src/main/java/org/rocksdb/CompactionReason.java +++ b/java/src/main/java/org/rocksdb/CompactionReason.java @@ -9,7 +9,6 @@ * Reasons for compaction. */ public enum CompactionReason { - /** * Unknown. */ diff --git a/java/src/main/java/org/rocksdb/CompressionOptions.java b/java/src/main/java/org/rocksdb/CompressionOptions.java index 53f1480cda08..138634c47a1d 100644 --- a/java/src/main/java/org/rocksdb/CompressionOptions.java +++ b/java/src/main/java/org/rocksdb/CompressionOptions.java @@ -9,7 +9,6 @@ * Options for Compression */ public class CompressionOptions extends RocksObject { - /** * RocksDB's generic default compression level. Internally it'll be translated * to the default compression level specific to the library being used. @@ -71,7 +70,7 @@ public CompressionOptions setLevel(final int level) { * Get the Compression "level". *

* See {@link #setLevel(int)} - * + * * @return the compression level. */ public int level() { diff --git a/java/src/main/java/org/rocksdb/ConcurrentTaskLimiter.java b/java/src/main/java/org/rocksdb/ConcurrentTaskLimiter.java index 33bbcd5ba41e..b07d691380f4 100644 --- a/java/src/main/java/org/rocksdb/ConcurrentTaskLimiter.java +++ b/java/src/main/java/org/rocksdb/ConcurrentTaskLimiter.java @@ -13,7 +13,8 @@ public abstract class ConcurrentTaskLimiter extends RocksObject { /** * Constructs a ConcurrentTaskLimiter. * - * @param nativeHandle reference to the value of the C++ pointer pointing to the underlying native RocksDB C++ concurrent task limiter object. + * @param nativeHandle reference to the value of the C++ pointer pointing to the underlying native + * RocksDB C++ concurrent task limiter object. */ protected ConcurrentTaskLimiter(final long nativeHandle) { super(nativeHandle); diff --git a/java/src/main/java/org/rocksdb/ConcurrentTaskLimiterImpl.java b/java/src/main/java/org/rocksdb/ConcurrentTaskLimiterImpl.java index 145eef11bba6..acfc7829de97 100644 --- a/java/src/main/java/org/rocksdb/ConcurrentTaskLimiterImpl.java +++ b/java/src/main/java/org/rocksdb/ConcurrentTaskLimiterImpl.java @@ -10,7 +10,6 @@ * Concurrent Task Limiter. */ public class ConcurrentTaskLimiterImpl extends ConcurrentTaskLimiter { - /** * Construct a new Concurrent Task Limiter. * diff --git a/java/src/main/java/org/rocksdb/DirectSlice.java b/java/src/main/java/org/rocksdb/DirectSlice.java index 83f7a2acc5e0..be7b2cd9e546 100644 --- a/java/src/main/java/org/rocksdb/DirectSlice.java +++ b/java/src/main/java/org/rocksdb/DirectSlice.java @@ -16,7 +16,6 @@ * values consider using @see org.rocksdb.Slice */ public class DirectSlice extends AbstractSlice { - /** * Constant for No Direct Slice. */ diff --git a/java/src/main/java/org/rocksdb/Filter.java b/java/src/main/java/org/rocksdb/Filter.java index 94f3c4d6a82f..9f42b84fa562 100644 --- a/java/src/main/java/org/rocksdb/Filter.java +++ b/java/src/main/java/org/rocksdb/Filter.java @@ -14,11 +14,11 @@ */ //TODO(AR) should be renamed FilterPolicy public abstract class Filter extends RocksObject { - /** * Constructs a filter. * - * @param nativeHandle reference to the value of the C++ pointer pointing to the underlying native RocksDB C++ filter object. + * @param nativeHandle reference to the value of the C++ pointer pointing to the underlying native + * RocksDB C++ filter object. */ protected Filter(final long nativeHandle) { super(nativeHandle); diff --git a/java/src/main/java/org/rocksdb/GetStatus.java b/java/src/main/java/org/rocksdb/GetStatus.java index a7ab4902f3f3..0c9fbd27f8c8 100644 --- a/java/src/main/java/org/rocksdb/GetStatus.java +++ b/java/src/main/java/org/rocksdb/GetStatus.java @@ -12,7 +12,6 @@ * If the target of the fetch is not big enough, this may be bigger than the contents of the target. */ public class GetStatus { - /** * The status of the request to fetch into the buffer. */ diff --git a/java/src/main/java/org/rocksdb/HashLinkedListMemTableConfig.java b/java/src/main/java/org/rocksdb/HashLinkedListMemTableConfig.java index 4edfd3a0d7fa..509441ddf81c 100644 --- a/java/src/main/java/org/rocksdb/HashLinkedListMemTableConfig.java +++ b/java/src/main/java/org/rocksdb/HashLinkedListMemTableConfig.java @@ -15,7 +15,6 @@ * and post a warning in the LOG. */ public class HashLinkedListMemTableConfig extends MemTableConfig { - /** * The default number of buckets. */ diff --git a/java/src/main/java/org/rocksdb/HashSkipListMemTableConfig.java b/java/src/main/java/org/rocksdb/HashSkipListMemTableConfig.java index 187866213c4c..8197a8879b2d 100644 --- a/java/src/main/java/org/rocksdb/HashSkipListMemTableConfig.java +++ b/java/src/main/java/org/rocksdb/HashSkipListMemTableConfig.java @@ -15,7 +15,6 @@ * and post a warning in the LOG. */ public class HashSkipListMemTableConfig extends MemTableConfig { - /** * The default number of buckets. */ diff --git a/java/src/main/java/org/rocksdb/ImportColumnFamilyOptions.java b/java/src/main/java/org/rocksdb/ImportColumnFamilyOptions.java index 0c5dea7de6ba..100dd6eb897a 100644 --- a/java/src/main/java/org/rocksdb/ImportColumnFamilyOptions.java +++ b/java/src/main/java/org/rocksdb/ImportColumnFamilyOptions.java @@ -12,7 +12,6 @@ * ExportImportFilesMetaData)}. */ public class ImportColumnFamilyOptions extends RocksObject { - /** * Constructs an ImportColumnFamilyOptions. */ diff --git a/java/src/main/java/org/rocksdb/IngestExternalFileOptions.java b/java/src/main/java/org/rocksdb/IngestExternalFileOptions.java index 040313c59fc2..c9419f46ac41 100644 --- a/java/src/main/java/org/rocksdb/IngestExternalFileOptions.java +++ b/java/src/main/java/org/rocksdb/IngestExternalFileOptions.java @@ -11,7 +11,6 @@ * {@link RocksDB#ingestExternalFile(ColumnFamilyHandle, List, IngestExternalFileOptions)}. */ public class IngestExternalFileOptions extends RocksObject { - /** * Constructs an IngestExternalFileOptions. */ diff --git a/java/src/main/java/org/rocksdb/KeyMayExist.java b/java/src/main/java/org/rocksdb/KeyMayExist.java index 98d176f6d4fd..a87af6a05a7d 100644 --- a/java/src/main/java/org/rocksdb/KeyMayExist.java +++ b/java/src/main/java/org/rocksdb/KeyMayExist.java @@ -28,7 +28,8 @@ public int hashCode() { } /** - * Part of the return type from {@link RocksDB#keyMayExist(ColumnFamilyHandle, ByteBuffer, ByteBuffer)}. + * Part of the return type from {@link RocksDB#keyMayExist(ColumnFamilyHandle, ByteBuffer, + * ByteBuffer)}. */ public enum KeyMayExistEnum { /** diff --git a/java/src/main/java/org/rocksdb/MergeOperator.java b/java/src/main/java/org/rocksdb/MergeOperator.java index 630c400cfa9a..9bf93c8b5a62 100644 --- a/java/src/main/java/org/rocksdb/MergeOperator.java +++ b/java/src/main/java/org/rocksdb/MergeOperator.java @@ -12,13 +12,13 @@ * value. */ public abstract class MergeOperator extends RocksObject { - - /** - * Constructs a MergeOperator. - * - * @param nativeHandle reference to the value of the C++ pointer pointing to the underlying native RocksDB C++ MergeOperator. - */ - protected MergeOperator(final long nativeHandle) { - super(nativeHandle); - } + /** + * Constructs a MergeOperator. + * + * @param nativeHandle reference to the value of the C++ pointer pointing to the underlying native + * RocksDB C++ MergeOperator. + */ + protected MergeOperator(final long nativeHandle) { + super(nativeHandle); + } } diff --git a/java/src/main/java/org/rocksdb/MutableOptionKey.java b/java/src/main/java/org/rocksdb/MutableOptionKey.java index 2a0b46823bd9..a9b03a0ac1b5 100644 --- a/java/src/main/java/org/rocksdb/MutableOptionKey.java +++ b/java/src/main/java/org/rocksdb/MutableOptionKey.java @@ -5,7 +5,6 @@ * Mutable Option keys. */ public interface MutableOptionKey { - /** * Types of values used for Mutable Options, */ diff --git a/java/src/main/java/org/rocksdb/OptimisticTransactionDB.java b/java/src/main/java/org/rocksdb/OptimisticTransactionDB.java index 42104bfcbe2b..d088cd1a43be 100644 --- a/java/src/main/java/org/rocksdb/OptimisticTransactionDB.java +++ b/java/src/main/java/org/rocksdb/OptimisticTransactionDB.java @@ -229,8 +229,8 @@ protected final void disposeInternal(final long handle) { } private static native void disposeInternalJni(final long handle); - private static native long open(final long optionsHandle, - final String path) throws RocksDBException; + private static native long open(final long optionsHandle, final String path) + throws RocksDBException; private static native long[] open(final long handle, final String path, final byte[][] columnFamilyNames, final long[] columnFamilyOptions); private static native void closeDatabase(final long handle) throws RocksDBException; diff --git a/java/src/main/java/org/rocksdb/OptimisticTransactionOptions.java b/java/src/main/java/org/rocksdb/OptimisticTransactionOptions.java index 5f1f1842cac5..22bf90901d43 100644 --- a/java/src/main/java/org/rocksdb/OptimisticTransactionOptions.java +++ b/java/src/main/java/org/rocksdb/OptimisticTransactionOptions.java @@ -10,7 +10,6 @@ */ public class OptimisticTransactionOptions extends RocksObject implements TransactionalOptions { - /** * Constructs an OptimisticTransactionOptions. */ diff --git a/java/src/main/java/org/rocksdb/PerfContext.java b/java/src/main/java/org/rocksdb/PerfContext.java index a9f6402bb76b..9e48ec79dbc1 100644 --- a/java/src/main/java/org/rocksdb/PerfContext.java +++ b/java/src/main/java/org/rocksdb/PerfContext.java @@ -9,11 +9,11 @@ * Performance Context. */ public class PerfContext extends RocksObject { - /** * Constructs a PerfContext. * - * @param nativeHandle reference to the value of the C++ pointer pointing to the underlying native RocksDB C++ PerfContext. + * @param nativeHandle reference to the value of the C++ pointer pointing to the underlying native + * RocksDB C++ PerfContext. */ protected PerfContext(final long nativeHandle) { super(nativeHandle); @@ -608,7 +608,8 @@ public long getEnvReuseWritableFileNanos() { } /** - * Get the time taken in nanoseconds for creating new random access read-write file(s) in the environment. + * Get the time taken in nanoseconds for creating new random access read-write file(s) in the + * environment. * * @return the total time */ @@ -671,7 +672,8 @@ public long getEnvCreateDirNanos() { } /** - * Get the time taken in nanoseconds for creating directories(s) (only if not already existing) in the environment. + * Get the time taken in nanoseconds for creating directories(s) (only if not already existing) in + * the environment. * * @return the total time */ diff --git a/java/src/main/java/org/rocksdb/PersistentCache.java b/java/src/main/java/org/rocksdb/PersistentCache.java index e9e34343a6ab..3ba3b9609dec 100644 --- a/java/src/main/java/org/rocksdb/PersistentCache.java +++ b/java/src/main/java/org/rocksdb/PersistentCache.java @@ -10,7 +10,6 @@ * cache is specifically designed for persistent read cache. */ public class PersistentCache extends RocksObject { - /** * Constructs a persistent cache. * diff --git a/java/src/main/java/org/rocksdb/PlainTableConfig.java b/java/src/main/java/org/rocksdb/PlainTableConfig.java index 01bd76c73736..f577b7c6f14b 100644 --- a/java/src/main/java/org/rocksdb/PlainTableConfig.java +++ b/java/src/main/java/org/rocksdb/PlainTableConfig.java @@ -13,7 +13,6 @@ *

It also support prefix hash feature.

*/ public class PlainTableConfig extends TableFormatConfig { - /** * Indicates that the key sizew can be variable length. */ diff --git a/java/src/main/java/org/rocksdb/PrepopulateBlobCache.java b/java/src/main/java/org/rocksdb/PrepopulateBlobCache.java index d2a02f6a9271..e7317363f9cc 100644 --- a/java/src/main/java/org/rocksdb/PrepopulateBlobCache.java +++ b/java/src/main/java/org/rocksdb/PrepopulateBlobCache.java @@ -18,7 +18,6 @@ * system since it involves network traffic and higher latencies.

*/ public enum PrepopulateBlobCache { - /** * Disable pre-populating the blob cache */ diff --git a/java/src/main/java/org/rocksdb/Priority.java b/java/src/main/java/org/rocksdb/Priority.java index ac656f9a350f..44026ed67a0b 100644 --- a/java/src/main/java/org/rocksdb/Priority.java +++ b/java/src/main/java/org/rocksdb/Priority.java @@ -9,7 +9,6 @@ * The Thread Pool priority. */ public enum Priority { - /** * Bottom most priority. */ diff --git a/java/src/main/java/org/rocksdb/RateLimiter.java b/java/src/main/java/org/rocksdb/RateLimiter.java index caf73e749020..8cfe028d53f4 100644 --- a/java/src/main/java/org/rocksdb/RateLimiter.java +++ b/java/src/main/java/org/rocksdb/RateLimiter.java @@ -12,7 +12,6 @@ * @since 3.10.0 */ public class RateLimiter extends RocksObject { - /** * The default refill period in microseconds. */ diff --git a/java/src/main/java/org/rocksdb/RateLimiterMode.java b/java/src/main/java/org/rocksdb/RateLimiterMode.java index d0bdc3882b1d..68ea265d98ec 100644 --- a/java/src/main/java/org/rocksdb/RateLimiterMode.java +++ b/java/src/main/java/org/rocksdb/RateLimiterMode.java @@ -9,7 +9,6 @@ * Mode for {@link RateLimiter#RateLimiter(long, long, int, RateLimiterMode)}. */ public enum RateLimiterMode { - /** * Only rate limit reads. */ diff --git a/java/src/main/java/org/rocksdb/ReadOptions.java b/java/src/main/java/org/rocksdb/ReadOptions.java index 26503cb22544..f141ef55e747 100644 --- a/java/src/main/java/org/rocksdb/ReadOptions.java +++ b/java/src/main/java/org/rocksdb/ReadOptions.java @@ -12,7 +12,6 @@ * become out-of-scope to release the allocated memory in c++. */ public class ReadOptions extends RocksObject { - /** * Constructs a ReadOptions. */ diff --git a/java/src/main/java/org/rocksdb/ReadTier.java b/java/src/main/java/org/rocksdb/ReadTier.java index b200823544ca..43dd893c2ed2 100644 --- a/java/src/main/java/org/rocksdb/ReadTier.java +++ b/java/src/main/java/org/rocksdb/ReadTier.java @@ -9,7 +9,6 @@ * RocksDB {@link ReadOptions} read tiers. */ public enum ReadTier { - /** * Read all tiers. */ diff --git a/java/src/main/java/org/rocksdb/RemoveEmptyValueCompactionFilter.java b/java/src/main/java/org/rocksdb/RemoveEmptyValueCompactionFilter.java index 935828d0e1dd..52d0d90d4f76 100644 --- a/java/src/main/java/org/rocksdb/RemoveEmptyValueCompactionFilter.java +++ b/java/src/main/java/org/rocksdb/RemoveEmptyValueCompactionFilter.java @@ -10,7 +10,6 @@ */ public class RemoveEmptyValueCompactionFilter extends AbstractCompactionFilter { - /** * Constructs a RemoveEmptyValueCompactionFilter. */ diff --git a/java/src/main/java/org/rocksdb/RocksCallbackObject.java b/java/src/main/java/org/rocksdb/RocksCallbackObject.java index 345eeb2b7fc2..34cdd5d95b6c 100644 --- a/java/src/main/java/org/rocksdb/RocksCallbackObject.java +++ b/java/src/main/java/org/rocksdb/RocksCallbackObject.java @@ -22,7 +22,6 @@ */ public abstract class RocksCallbackObject extends AbstractImmutableNativeReference { - /** * An immutable reference to the value of the C++ pointer pointing to some * underlying native RocksDB C++ object that @@ -33,7 +32,8 @@ public abstract class RocksCallbackObject extends /** * Constructs a RocksCallbackObject. * - * @param nativeParameterHandles reference to the value of the C++ pointers pointing to the underlying native RocksDB C++ objects. + * @param nativeParameterHandles reference to the value of the C++ pointers pointing to the + * underlying native RocksDB C++ objects. */ protected RocksCallbackObject(final long... nativeParameterHandles) { super(true); diff --git a/java/src/main/java/org/rocksdb/RocksDB.java b/java/src/main/java/org/rocksdb/RocksDB.java index da17cf02241a..fe074c9ba60c 100644 --- a/java/src/main/java/org/rocksdb/RocksDB.java +++ b/java/src/main/java/org/rocksdb/RocksDB.java @@ -21,7 +21,6 @@ * indicates sth wrong at the RocksDB library side and the call failed. */ public class RocksDB extends RocksObject { - /** * The name of the default column family. */ diff --git a/java/src/main/java/org/rocksdb/RocksIterator.java b/java/src/main/java/org/rocksdb/RocksIterator.java index 87713d70590b..fbb042aca13d 100644 --- a/java/src/main/java/org/rocksdb/RocksIterator.java +++ b/java/src/main/java/org/rocksdb/RocksIterator.java @@ -23,12 +23,12 @@ * @see org.rocksdb.RocksObject */ public class RocksIterator extends AbstractRocksIterator { - /** * Constructs a RocksIterator. * * @param rocksDb the database. - * @param nativeHandle reference to the value of the C++ pointer pointing to the underlying native RocksDB C++ RocksIterator. + * @param nativeHandle reference to the value of the C++ pointer pointing to the underlying native + * RocksDB C++ RocksIterator. */ protected RocksIterator(final RocksDB rocksDb, final long nativeHandle) { super(rocksDb, nativeHandle); diff --git a/java/src/main/java/org/rocksdb/RocksMutableObject.java b/java/src/main/java/org/rocksdb/RocksMutableObject.java index 6312634a4d52..3a69f699657d 100644 --- a/java/src/main/java/org/rocksdb/RocksMutableObject.java +++ b/java/src/main/java/org/rocksdb/RocksMutableObject.java @@ -31,7 +31,8 @@ protected RocksMutableObject() { /** * Constructs a RocksMutableObject. * - * @param nativeHandle reference to the value of the C++ pointer pointing to the underlying native RocksDB C++ object. + * @param nativeHandle reference to the value of the C++ pointer pointing to the underlying native + * RocksDB C++ object. */ protected RocksMutableObject(final long nativeHandle) { this.nativeHandle_ = nativeHandle; @@ -99,7 +100,8 @@ protected void disposeInternal() { * All subclasses of {@code RocksObject} must * implement this to release their underlying native C++ objects. * - * @param handle reference to the value of the C++ pointer pointing to some underlying native RocksDB C++ object. + * @param handle reference to the value of the C++ pointer pointing to some underlying native + * RocksDB C++ object. */ protected abstract void disposeInternal(final long handle); } diff --git a/java/src/main/java/org/rocksdb/RocksObject.java b/java/src/main/java/org/rocksdb/RocksObject.java index a7657224cfd4..9af9d41ae7aa 100644 --- a/java/src/main/java/org/rocksdb/RocksObject.java +++ b/java/src/main/java/org/rocksdb/RocksObject.java @@ -28,7 +28,8 @@ public abstract class RocksObject extends AbstractImmutableNativeReference { * Constructor to be called by subclasses to set the * handle to the underlying C++ object. * - * @param nativeHandle reference to the value of the C++ pointer pointing to the underlying native RocksDB C++ object. + * @param nativeHandle reference to the value of the C++ pointer pointing to the underlying native + * RocksDB C++ object. */ protected RocksObject(final long nativeHandle) { super(true); @@ -48,7 +49,8 @@ protected void disposeInternal() { * All subclasses of {@code RocksObject} must * implement this to release their underlying native C++ objects. * - * @param handle reference to the value of the C++ pointer pointing to some underlying native RocksDB C++ object. + * @param handle reference to the value of the C++ pointer pointing to some underlying native + * RocksDB C++ object. */ protected abstract void disposeInternal(final long handle); } diff --git a/java/src/main/java/org/rocksdb/SanityLevel.java b/java/src/main/java/org/rocksdb/SanityLevel.java index e24671c287b6..f8546913a97f 100644 --- a/java/src/main/java/org/rocksdb/SanityLevel.java +++ b/java/src/main/java/org/rocksdb/SanityLevel.java @@ -10,7 +10,6 @@ * The Sanity Level. */ public enum SanityLevel { - /** * None. */ diff --git a/java/src/main/java/org/rocksdb/SizeApproximationFlag.java b/java/src/main/java/org/rocksdb/SizeApproximationFlag.java index 3e2759a10bd5..e4ed68562d53 100644 --- a/java/src/main/java/org/rocksdb/SizeApproximationFlag.java +++ b/java/src/main/java/org/rocksdb/SizeApproximationFlag.java @@ -10,7 +10,6 @@ * or file stats approximation or both. */ public enum SizeApproximationFlag { - /** * None */ diff --git a/java/src/main/java/org/rocksdb/SkipListMemTableConfig.java b/java/src/main/java/org/rocksdb/SkipListMemTableConfig.java index 558a2a6f50b8..b783f3ecd021 100644 --- a/java/src/main/java/org/rocksdb/SkipListMemTableConfig.java +++ b/java/src/main/java/org/rocksdb/SkipListMemTableConfig.java @@ -5,7 +5,6 @@ * The config for skip-list memtable representation. */ public class SkipListMemTableConfig extends MemTableConfig { - /** * The default lookahead. */ diff --git a/java/src/main/java/org/rocksdb/SstFileManager.java b/java/src/main/java/org/rocksdb/SstFileManager.java index e201e2603044..9932abd43552 100644 --- a/java/src/main/java/org/rocksdb/SstFileManager.java +++ b/java/src/main/java/org/rocksdb/SstFileManager.java @@ -17,7 +17,6 @@ */ //@ThreadSafe public final class SstFileManager extends RocksObject { - /** * The default bytes-per-sec rate. */ @@ -105,8 +104,7 @@ public SstFileManager(final Env env, /*@Nullable*/ final Logger logger, public SstFileManager(final Env env, /*@Nullable*/ final Logger logger, final long rateBytesPerSec, final double maxTrashDbRatio) throws RocksDBException { - this(env, logger, rateBytesPerSec, maxTrashDbRatio, - DEFAULT_BYTES_MAX_DELETE_CHUNK); + this(env, logger, rateBytesPerSec, maxTrashDbRatio, DEFAULT_BYTES_MAX_DELETE_CHUNK); } /** diff --git a/java/src/main/java/org/rocksdb/SstFileReader.java b/java/src/main/java/org/rocksdb/SstFileReader.java index e7e81bcc2da0..a4b03c7d050a 100644 --- a/java/src/main/java/org/rocksdb/SstFileReader.java +++ b/java/src/main/java/org/rocksdb/SstFileReader.java @@ -9,7 +9,6 @@ * An SST File Reader. */ public class SstFileReader extends RocksObject { - /** * Constructs an SstFileReader. * diff --git a/java/src/main/java/org/rocksdb/SstFileReaderIterator.java b/java/src/main/java/org/rocksdb/SstFileReaderIterator.java index 71b2d46fb9e4..821a6bff0e0a 100644 --- a/java/src/main/java/org/rocksdb/SstFileReaderIterator.java +++ b/java/src/main/java/org/rocksdb/SstFileReaderIterator.java @@ -21,12 +21,12 @@ * @see RocksObject */ public class SstFileReaderIterator extends AbstractRocksIterator { - /** * Constructs a SstFileReaderIterator. * * @param reader the SST file reader. - * @param nativeHandle reference to the value of the C++ pointer pointing to the underlying native RocksDB C++ SstFileReaderIterator. + * @param nativeHandle reference to the value of the C++ pointer pointing to the underlying native + * RocksDB C++ SstFileReaderIterator. */ protected SstFileReaderIterator(final SstFileReader reader, final long nativeHandle) { super(reader, nativeHandle); diff --git a/java/src/main/java/org/rocksdb/SstPartitionerFactory.java b/java/src/main/java/org/rocksdb/SstPartitionerFactory.java index a87cfd75ffa4..4e9c72bfdd94 100644 --- a/java/src/main/java/org/rocksdb/SstPartitionerFactory.java +++ b/java/src/main/java/org/rocksdb/SstPartitionerFactory.java @@ -12,7 +12,8 @@ public abstract class SstPartitionerFactory extends RocksObject { /** * Constructs a SstPartitionerFactory. * - * @param nativeHandle reference to the value of the C++ pointer pointing to the underlying native RocksDB C++ SstPartitionerFactory. + * @param nativeHandle reference to the value of the C++ pointer pointing to the underlying native + * RocksDB C++ SstPartitionerFactory. */ protected SstPartitionerFactory(final long nativeHandle) { super(nativeHandle); diff --git a/java/src/main/java/org/rocksdb/SstPartitionerFixedPrefixFactory.java b/java/src/main/java/org/rocksdb/SstPartitionerFixedPrefixFactory.java index 128243db0161..d14cbe69e20d 100644 --- a/java/src/main/java/org/rocksdb/SstPartitionerFixedPrefixFactory.java +++ b/java/src/main/java/org/rocksdb/SstPartitionerFixedPrefixFactory.java @@ -9,7 +9,6 @@ * Fixed prefix factory. It partitions SST files using fixed prefix of the key. */ public class SstPartitionerFixedPrefixFactory extends SstPartitionerFactory { - /** * Constructs an SstPartitionerFixedPrefixFactory. * diff --git a/java/src/main/java/org/rocksdb/StateType.java b/java/src/main/java/org/rocksdb/StateType.java index 8151b098cf3e..78829dbf557e 100644 --- a/java/src/main/java/org/rocksdb/StateType.java +++ b/java/src/main/java/org/rocksdb/StateType.java @@ -12,7 +12,6 @@ * such as reading / writing a file or waiting for a mutex. */ public enum StateType { - /** * Unknown. */ diff --git a/java/src/main/java/org/rocksdb/Statistics.java b/java/src/main/java/org/rocksdb/Statistics.java index f9c1f20d645c..bb1f197fd0bf 100644 --- a/java/src/main/java/org/rocksdb/Statistics.java +++ b/java/src/main/java/org/rocksdb/Statistics.java @@ -12,7 +12,6 @@ * is managed by Options class. */ public class Statistics extends RocksObject { - /** * Constructs a Statistics. */ diff --git a/java/src/main/java/org/rocksdb/Status.java b/java/src/main/java/org/rocksdb/Status.java index fa8e86bc603c..11c5ceff0274 100644 --- a/java/src/main/java/org/rocksdb/Status.java +++ b/java/src/main/java/org/rocksdb/Status.java @@ -100,7 +100,8 @@ public String getCodeString() { /** * Status Code. *

- * Should stay in sync with /include/rocksdb/status.h:Code and /java/rocksjni/portal.h:toJavaStatusCode + * Should stay in sync with /include/rocksdb/status.h:Code and + * /java/rocksjni/portal.h:toJavaStatusCode */ public enum Code { /** @@ -216,10 +217,10 @@ public byte getValue() { /** * Status Sub-code. *

- * should stay in sync with /include/rocksdb/status.h:SubCode and /java/rocksjni/portal.h:toJavaStatusSubCode + * should stay in sync with /include/rocksdb/status.h:SubCode and + * /java/rocksjni/portal.h:toJavaStatusSubCode */ public enum SubCode { - /** * None. */ @@ -278,7 +279,8 @@ public enum SubCode { * * @return the sub-code * - * @throws IllegalArgumentException if the {@code value} parameter does not represent a sub-code. + * @throws IllegalArgumentException if the {@code value} parameter does not represent a + * sub-code. */ public static SubCode getSubCode(final byte value) { for (final SubCode subCode : SubCode.values()) { diff --git a/java/src/main/java/org/rocksdb/TableFileCreationReason.java b/java/src/main/java/org/rocksdb/TableFileCreationReason.java index f45da28e5776..eaa06245a0c0 100644 --- a/java/src/main/java/org/rocksdb/TableFileCreationReason.java +++ b/java/src/main/java/org/rocksdb/TableFileCreationReason.java @@ -9,7 +9,6 @@ * Reasons for Table File creation. */ public enum TableFileCreationReason { - /** * Flush. */ diff --git a/java/src/main/java/org/rocksdb/Transaction.java b/java/src/main/java/org/rocksdb/Transaction.java index 5db1384c7740..7b1fc532a2df 100644 --- a/java/src/main/java/org/rocksdb/Transaction.java +++ b/java/src/main/java/org/rocksdb/Transaction.java @@ -2860,12 +2860,12 @@ public enum TransactionState { /** * Old misspelled variable as alias for {@link #COMMITTED}. - * Tip from https://stackoverflow.com/a/37092410/454544 + * Tip from https://stackoverflow.com/a/37092410/454544 * * @deprecated use {@link #COMMITTED} instead. */ - @Deprecated - public static final TransactionState COMMITED = COMMITTED; + @Deprecated public static final TransactionState COMMITED = COMMITTED; private final byte value; diff --git a/java/src/main/java/org/rocksdb/TransactionDBOptions.java b/java/src/main/java/org/rocksdb/TransactionDBOptions.java index abc135ca4b78..15e8c2e52bcd 100644 --- a/java/src/main/java/org/rocksdb/TransactionDBOptions.java +++ b/java/src/main/java/org/rocksdb/TransactionDBOptions.java @@ -9,7 +9,6 @@ * Options for TransactionDB. */ public class TransactionDBOptions extends RocksObject { - /** * Constructs a TransactionDB. */ @@ -117,12 +116,10 @@ public TransactionDBOptions setTransactionLockTimeout( /** * The wait timeout in milliseconds when writing a key * OUTSIDE of a transaction (ie by calling {@link RocksDB#put(byte[], byte[])}, - * {@link RocksDB#merge(byte[], byte[])}, {@link RocksDB#delete(WriteOptions, byte[])} or {@link RocksDB#write(WriteOptions, WriteBatch)} - * directly). - *

- * If 0, no waiting is done if a lock cannot instantly be acquired. - * If negative, there is no timeout and will block indefinitely when acquiring - * a lock. + * {@link RocksDB#merge(byte[], byte[])}, {@link RocksDB#delete(WriteOptions, byte[])} or {@link + * RocksDB#write(WriteOptions, WriteBatch)} directly).

If 0, no waiting is done if a lock + * cannot instantly be acquired. If negative, there is no timeout and will block indefinitely when + * acquiring a lock. * * @return the timeout in milliseconds when writing a key outside of the transaction */ @@ -134,20 +131,13 @@ public long getDefaultLockTimeout() { /** * If positive, specifies the wait timeout in milliseconds when writing a key * OUTSIDE of a transaction (ie by calling {@link RocksDB#put(byte[], byte[])}, - * {@link RocksDB#merge(byte[], byte[])}, {@link RocksDB#delete(byte[])} or {@link RocksDB#write(WriteOptions, WriteBatch)} - * directly). - *

- * If 0, no waiting is done if a lock cannot instantly be acquired. - * If negative, there is no timeout and will block indefinitely when acquiring - * a lock. - *

- * Not using a timeout can lead to deadlocks. Currently, there - * is no deadlock-detection to recover from a deadlock. While DB writes - * cannot deadlock with other DB writes, they can deadlock with a transaction. - * A negative timeout should only be used if all transactions have a small - * expiration set. - *

- * Default: 1000 + * {@link RocksDB#merge(byte[], byte[])}, {@link RocksDB#delete(byte[])} or {@link + * RocksDB#write(WriteOptions, WriteBatch)} directly).

If 0, no waiting is done if a lock + * cannot instantly be acquired. If negative, there is no timeout and will block indefinitely when + * acquiring a lock.

Not using a timeout can lead to deadlocks. Currently, there is no + * deadlock-detection to recover from a deadlock. While DB writes cannot deadlock with other DB + * writes, they can deadlock with a transaction. A negative timeout should only be used if all + * transactions have a small expiration set.

Default: 1000 * * @param defaultLockTimeout the timeout in milliseconds when writing a key * outside of the transaction diff --git a/java/src/main/java/org/rocksdb/TransactionOptions.java b/java/src/main/java/org/rocksdb/TransactionOptions.java index 43a482b240b4..924e931e78f8 100644 --- a/java/src/main/java/org/rocksdb/TransactionOptions.java +++ b/java/src/main/java/org/rocksdb/TransactionOptions.java @@ -10,7 +10,6 @@ */ public class TransactionOptions extends RocksObject implements TransactionalOptions { - /** * Constructs a TransactionOptions. */ diff --git a/java/src/main/java/org/rocksdb/UInt64AddOperator.java b/java/src/main/java/org/rocksdb/UInt64AddOperator.java index 524d2a18651b..f43903706930 100644 --- a/java/src/main/java/org/rocksdb/UInt64AddOperator.java +++ b/java/src/main/java/org/rocksdb/UInt64AddOperator.java @@ -10,12 +10,12 @@ * integer value. */ public class UInt64AddOperator extends MergeOperator { - /** - * Constructs a UInt64AddOperator. - */ - public UInt64AddOperator() { - super(newSharedUInt64AddOperator()); - } + /** + * Constructs a UInt64AddOperator. + */ + public UInt64AddOperator() { + super(newSharedUInt64AddOperator()); + } private static native long newSharedUInt64AddOperator(); @Override diff --git a/java/src/main/java/org/rocksdb/VectorMemTableConfig.java b/java/src/main/java/org/rocksdb/VectorMemTableConfig.java index bc09c50ce08e..428727c80e8c 100644 --- a/java/src/main/java/org/rocksdb/VectorMemTableConfig.java +++ b/java/src/main/java/org/rocksdb/VectorMemTableConfig.java @@ -5,7 +5,6 @@ * The config for vector memtable representation. */ public class VectorMemTableConfig extends MemTableConfig { - /** * The default reserved size for the Vector Mem Table. */ diff --git a/java/src/main/java/org/rocksdb/WBWIRocksIterator.java b/java/src/main/java/org/rocksdb/WBWIRocksIterator.java index 441f290a5fb8..bdfdae31ebf4 100644 --- a/java/src/main/java/org/rocksdb/WBWIRocksIterator.java +++ b/java/src/main/java/org/rocksdb/WBWIRocksIterator.java @@ -18,7 +18,8 @@ public class WBWIRocksIterator * Constructs a WBWIRocksIterator. * * @param wbwi the write batch with index. - * @param nativeHandle reference to the value of the C++ pointer pointing to the underlying native RocksDB C++ WBWIRocksIterator. + * @param nativeHandle reference to the value of the C++ pointer pointing to the underlying native + * RocksDB C++ WBWIRocksIterator. */ protected WBWIRocksIterator(final WriteBatchWithIndex wbwi, final long nativeHandle) { @@ -136,7 +137,6 @@ private static native void seekForPrevByteArray0Jni( * that created the record in the Write Batch */ public enum WriteType { - /** * Put. */ @@ -184,11 +184,12 @@ public enum WriteType { * * @return the WriteType * - * @throws IllegalArgumentException if the {@code value} parameter does not represent a WriteType. + * @throws IllegalArgumentException if the {@code value} parameter does not represent a + * WriteType. */ public static WriteType fromId(final byte value) { for(final WriteType wt : WriteType.values()) { - if(value == wt.id) { + if (value == wt.id) { return wt; } } diff --git a/java/src/main/java/org/rocksdb/WalFilter.java b/java/src/main/java/org/rocksdb/WalFilter.java index eac7b657f18d..3b04eafaca00 100644 --- a/java/src/main/java/org/rocksdb/WalFilter.java +++ b/java/src/main/java/org/rocksdb/WalFilter.java @@ -60,7 +60,6 @@ LogRecordFoundResult logRecordFound(final long logNumber, * LogFoundResult. */ class LogRecordFoundResult { - /** * Constant for continuing processing unchanged. */ diff --git a/java/src/main/java/org/rocksdb/WriteBatch.java b/java/src/main/java/org/rocksdb/WriteBatch.java index 4fdeecd6c2f3..32bd1f6c458e 100644 --- a/java/src/main/java/org/rocksdb/WriteBatch.java +++ b/java/src/main/java/org/rocksdb/WriteBatch.java @@ -657,7 +657,8 @@ public long getContentFlags() { * Determines if {@link #clear()} was * called. * - * @return true if {@link #clear()} was called and the save point remains empty, false otherwise. + * @return true if {@link #clear()} was called and the save point remains empty, false + * otherwise. */ public boolean isCleared() { return (size | count | contentFlags) == 0; diff --git a/java/src/main/java/org/rocksdb/WriteBufferManager.java b/java/src/main/java/org/rocksdb/WriteBufferManager.java index fd2cf560819f..5856b498aded 100644 --- a/java/src/main/java/org/rocksdb/WriteBufferManager.java +++ b/java/src/main/java/org/rocksdb/WriteBufferManager.java @@ -28,12 +28,12 @@ public WriteBufferManager( this.allowStall_ = allowStall; } - /** - * Construct a new instance of WriteBufferManager. - * - * @param bufferSizeBytes the buffer size in bytes. - * @param cache the cache to use. - */ + /** + * Construct a new instance of WriteBufferManager. + * + * @param bufferSizeBytes the buffer size in bytes. + * @param cache the cache to use. + */ public WriteBufferManager(final long bufferSizeBytes, final Cache cache) { this(bufferSizeBytes, cache, false); } diff --git a/java/src/main/java/org/rocksdb/WriteStallCondition.java b/java/src/main/java/org/rocksdb/WriteStallCondition.java index c91310374b1d..2c011bc2e390 100644 --- a/java/src/main/java/org/rocksdb/WriteStallCondition.java +++ b/java/src/main/java/org/rocksdb/WriteStallCondition.java @@ -9,7 +9,6 @@ * Conditions that caused Write Stalls. */ public enum WriteStallCondition { - /** * Delayed. */ diff --git a/java/src/main/java/org/rocksdb/util/BufferUtil.java b/java/src/main/java/org/rocksdb/util/BufferUtil.java index 4c23cfba86e5..dccf71ce2361 100644 --- a/java/src/main/java/org/rocksdb/util/BufferUtil.java +++ b/java/src/main/java/org/rocksdb/util/BufferUtil.java @@ -10,7 +10,6 @@ * Utility functions for working with buffers. */ public class BufferUtil { - /** * Check the bounds for an operation on a buffer. * diff --git a/java/src/main/java/org/rocksdb/util/ByteUtil.java b/java/src/main/java/org/rocksdb/util/ByteUtil.java index 8c32668ca8b9..b5139b86da32 100644 --- a/java/src/main/java/org/rocksdb/util/ByteUtil.java +++ b/java/src/main/java/org/rocksdb/util/ByteUtil.java @@ -33,7 +33,8 @@ public static byte[] bytes(final String str) { * is lexically greater than {@code y}. Note that lexical order is determined * as if comparing unsigned char arrays. *

- * Similar to memcmp.c. + * Similar to memcmp.c. * * @param x the first value to compare with * @param y the second value to compare against diff --git a/java/src/main/java/org/rocksdb/util/BytewiseComparator.java b/java/src/main/java/org/rocksdb/util/BytewiseComparator.java index fb32db3be0da..acb76c255b9e 100644 --- a/java/src/main/java/org/rocksdb/util/BytewiseComparator.java +++ b/java/src/main/java/org/rocksdb/util/BytewiseComparator.java @@ -22,7 +22,6 @@ * {@link org.rocksdb.BuiltinComparator#BYTEWISE_COMPARATOR} */ public final class BytewiseComparator extends AbstractComparator { - /** * Constructs a new BytewiseComparator. * diff --git a/java/src/main/java/org/rocksdb/util/Environment.java b/java/src/main/java/org/rocksdb/util/Environment.java index f6b2fa505657..f1bbcfe927e0 100644 --- a/java/src/main/java/org/rocksdb/util/Environment.java +++ b/java/src/main/java/org/rocksdb/util/Environment.java @@ -258,7 +258,6 @@ private static String getLibcPostfix() { return "-" + libcName; } - /** * Get the name as that of a JNI library. *

diff --git a/java/src/main/java/org/rocksdb/util/IntComparator.java b/java/src/main/java/org/rocksdb/util/IntComparator.java index cf3c6423f08c..142e81c3a21f 100644 --- a/java/src/main/java/org/rocksdb/util/IntComparator.java +++ b/java/src/main/java/org/rocksdb/util/IntComparator.java @@ -24,7 +24,6 @@ * or testing. */ public final class IntComparator extends AbstractComparator { - /** * Constructs an IntComparator. * diff --git a/java/src/main/java/org/rocksdb/util/ReverseBytewiseComparator.java b/java/src/main/java/org/rocksdb/util/ReverseBytewiseComparator.java index e145184eac6c..4d0708ca077a 100644 --- a/java/src/main/java/org/rocksdb/util/ReverseBytewiseComparator.java +++ b/java/src/main/java/org/rocksdb/util/ReverseBytewiseComparator.java @@ -23,7 +23,6 @@ * {@link BuiltinComparator#REVERSE_BYTEWISE_COMPARATOR} */ public final class ReverseBytewiseComparator extends AbstractComparator { - /** * Constructs a ReverseBytewiseComparator. * diff --git a/java/src/main/java/org/rocksdb/util/SizeUnit.java b/java/src/main/java/org/rocksdb/util/SizeUnit.java index 8582bb15436b..89938558f24d 100644 --- a/java/src/main/java/org/rocksdb/util/SizeUnit.java +++ b/java/src/main/java/org/rocksdb/util/SizeUnit.java @@ -9,7 +9,6 @@ * Simple factors of byte sizes. */ public interface SizeUnit { - /** * 1 Kilobyte. */ diff --git a/java/src/test/java/org/rocksdb/SstFileManagerTest.java b/java/src/test/java/org/rocksdb/SstFileManagerTest.java index 96c5627096d8..1984b8ed35fb 100644 --- a/java/src/test/java/org/rocksdb/SstFileManagerTest.java +++ b/java/src/test/java/org/rocksdb/SstFileManagerTest.java @@ -47,7 +47,8 @@ public void trackedFiles() throws RocksDBException { @Test public void deleteRateBytesPerSecond() throws RocksDBException { try (final SstFileManager sstFileManager = new SstFileManager(Env.getDefault())) { - assertThat(sstFileManager.getDeleteRateBytesPerSecond()).isEqualTo(SstFileManager.DEFAULT_RATE_BYTES_PER_SEC); + assertThat(sstFileManager.getDeleteRateBytesPerSecond()) + .isEqualTo(SstFileManager.DEFAULT_RATE_BYTES_PER_SEC); final long ratePerSecond = 1024 * 1024 * 52; sstFileManager.setDeleteRateBytesPerSecond(ratePerSecond); assertThat(sstFileManager.getDeleteRateBytesPerSecond()).isEqualTo(ratePerSecond); @@ -57,7 +58,8 @@ public void deleteRateBytesPerSecond() throws RocksDBException { @Test public void maxTrashDBRatio() throws RocksDBException { try (final SstFileManager sstFileManager = new SstFileManager(Env.getDefault())) { - assertThat(sstFileManager.getMaxTrashDBRatio()).isEqualTo(SstFileManager.DEFAULT_MAX_TRASH_DB_RATIO); + assertThat(sstFileManager.getMaxTrashDBRatio()) + .isEqualTo(SstFileManager.DEFAULT_MAX_TRASH_DB_RATIO); final double trashRatio = 0.2; sstFileManager.setMaxTrashDBRatio(trashRatio); assertThat(sstFileManager.getMaxTrashDBRatio()).isEqualTo(trashRatio); diff --git a/java/src/test/java/org/rocksdb/util/ByteBufferAllocator.java b/java/src/test/java/org/rocksdb/util/ByteBufferAllocator.java index c3309d787878..90f301c39fc7 100644 --- a/java/src/test/java/org/rocksdb/util/ByteBufferAllocator.java +++ b/java/src/test/java/org/rocksdb/util/ByteBufferAllocator.java @@ -12,7 +12,6 @@ * Allocates for creating new buffers. */ public interface ByteBufferAllocator { - /** * Allocate a new ByteBuffer. * From a55e28c1ffac3f3ac461b4c94a3a3d10bcb5d4c6 Mon Sep 17 00:00:00 2001 From: Adam Retter Date: Thu, 21 Dec 2023 22:05:18 +0100 Subject: [PATCH 09/15] Fix last compilation issues --- java/src/main/java/org/rocksdb/AbstractRocksIterator.java | 2 +- java/src/main/java/org/rocksdb/Options.java | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/java/src/main/java/org/rocksdb/AbstractRocksIterator.java b/java/src/main/java/org/rocksdb/AbstractRocksIterator.java index 4c744dc82e33..556e47226074 100644 --- a/java/src/main/java/org/rocksdb/AbstractRocksIterator.java +++ b/java/src/main/java/org/rocksdb/AbstractRocksIterator.java @@ -118,7 +118,7 @@ public void refresh() throws RocksDBException { @Override public void refresh(final Snapshot snapshot) throws RocksDBException { assert (isOwningHandle()); - refresh1(nativeHandle_, snapshot.getNativeHandle()); + refresh1(nativeHandle_, snapshot.nativeHandle_); } @Override diff --git a/java/src/main/java/org/rocksdb/Options.java b/java/src/main/java/org/rocksdb/Options.java index af30279dbb81..c14a13570536 100644 --- a/java/src/main/java/org/rocksdb/Options.java +++ b/java/src/main/java/org/rocksdb/Options.java @@ -2120,7 +2120,7 @@ public List tablePropertiesCollectorFactory() { public void setTablePropertiesCollectorFactory(List factories) { long[] factoryHandlers = new long[factories.size()]; for (int i = 0; i < factoryHandlers.length; i++) { - factoryHandlers[i] = factories.get(i).getNativeHandle(); + factoryHandlers[i] = factories.get(i).nativeHandle_; } setTablePropertiesCollectorFactory(nativeHandle_, factoryHandlers); } From a2a34f22a84bbd1a368a01cea8b6be6abc1f0859 Mon Sep 17 00:00:00 2001 From: Adam Retter Date: Thu, 21 Dec 2023 22:25:49 +0100 Subject: [PATCH 10/15] Fix JavaDoc after rebase on main --- .../utilities/table_properties_collectors.h | 2 +- .../java/org/rocksdb/DBOptionsInterface.java | 4 +- .../rocksdb/MutableDBOptionsInterface.java | 4 +- java/src/main/java/org/rocksdb/Options.java | 3 +- .../org/rocksdb/RocksIteratorInterface.java | 5 + .../TablePropertiesCollectorFactory.java | 109 +++++++++++------- .../main/java/org/rocksdb/Transaction.java | 3 +- .../test/java/org/rocksdb/OptionsTest.java | 3 +- 8 files changed, 82 insertions(+), 51 deletions(-) diff --git a/include/rocksdb/utilities/table_properties_collectors.h b/include/rocksdb/utilities/table_properties_collectors.h index c8c8af1de6a8..3395a7fd3998 100644 --- a/include/rocksdb/utilities/table_properties_collectors.h +++ b/include/rocksdb/utilities/table_properties_collectors.h @@ -83,7 +83,7 @@ class CompactOnDeletionCollectorFactory }; // Creates a factory of a table property collector that marks a SST -// file as need-compaction when it observe at least "D" deletion +// file as need-compaction when it observes at least "D" deletion // entries in any "N" consecutive entries, or the ratio of tombstone // entries >= deletion_ratio. // diff --git a/java/src/main/java/org/rocksdb/DBOptionsInterface.java b/java/src/main/java/org/rocksdb/DBOptionsInterface.java index f7a915eeabde..791cbd34c7f5 100644 --- a/java/src/main/java/org/rocksdb/DBOptionsInterface.java +++ b/java/src/main/java/org/rocksdb/DBOptionsInterface.java @@ -1718,7 +1718,9 @@ T setEnableWriteThreadAdaptiveYield( * use "0:00-23:59". To make an entire day have no offpeak period, leave * this field blank. Default: Empty string (no offpeak). * - * @param offpeakTimeUTC String value from which to parse offpeak time range + * @param offpeakTimeUTC String value from which to parse offpeak time range. + * + * @return the instance of the current object. */ T setDailyOffpeakTimeUTC(final String offpeakTimeUTC); diff --git a/java/src/main/java/org/rocksdb/MutableDBOptionsInterface.java b/java/src/main/java/org/rocksdb/MutableDBOptionsInterface.java index 304b1cc4cf2e..2971928a67e6 100644 --- a/java/src/main/java/org/rocksdb/MutableDBOptionsInterface.java +++ b/java/src/main/java/org/rocksdb/MutableDBOptionsInterface.java @@ -460,7 +460,9 @@ public interface MutableDBOptionsInterface tablePropertiesCollectorFactory() { * Set TablePropertiesCollectorFactory in underlying C++ object. * This method create its own copy of the list. Caller is responsible for * closing all the instances in the list. - * @param factories + * + * @param factories the collector factories. */ public void setTablePropertiesCollectorFactory(List factories) { long[] factoryHandlers = new long[factories.size()]; diff --git a/java/src/main/java/org/rocksdb/RocksIteratorInterface.java b/java/src/main/java/org/rocksdb/RocksIteratorInterface.java index 78f35e3f86a0..454ff95d6e62 100644 --- a/java/src/main/java/org/rocksdb/RocksIteratorInterface.java +++ b/java/src/main/java/org/rocksdb/RocksIteratorInterface.java @@ -133,6 +133,11 @@ public interface RocksIteratorInterface { /** * Similar to {@link #refresh()} but the iterator will be reading the latest DB state under the * given snapshot. + * + * @param snapshot the snapshot. + * + * @throws RocksDBException thrown if the operation is not supported or an error happens in the + * underlying native library */ void refresh(Snapshot snapshot) throws RocksDBException; } diff --git a/java/src/main/java/org/rocksdb/TablePropertiesCollectorFactory.java b/java/src/main/java/org/rocksdb/TablePropertiesCollectorFactory.java index ae2789ef8263..fbf521408017 100644 --- a/java/src/main/java/org/rocksdb/TablePropertiesCollectorFactory.java +++ b/java/src/main/java/org/rocksdb/TablePropertiesCollectorFactory.java @@ -1,44 +1,65 @@ -// Copyright (c) Meta Platforms, Inc. and affiliates. -// -// This source code is licensed under both the GPLv2 (found in the -// COPYING file in the root directory) and Apache 2.0 License -// (found in the LICENSE.Apache file in the root directory). - -package org.rocksdb; - -public abstract class TablePropertiesCollectorFactory extends RocksObject { - private TablePropertiesCollectorFactory(final long nativeHandle) { - super(nativeHandle); - } - - public static TablePropertiesCollectorFactory NewCompactOnDeletionCollectorFactory( - final long sliding_window_size, final long deletion_trigger, final double deletion_ratio) { - long handle = - newCompactOnDeletionCollectorFactory(sliding_window_size, deletion_trigger, deletion_ratio); - return new TablePropertiesCollectorFactory(handle) { - @Override - protected void disposeInternal(long handle) { - TablePropertiesCollectorFactory.deleteCompactOnDeletionCollectorFactory(handle); - } - }; - } - - /** - * Internal API. Do not use. - * @param nativeHandle - * @return - */ - static TablePropertiesCollectorFactory newWrapper(final long nativeHandle) { - return new TablePropertiesCollectorFactory(nativeHandle) { - @Override - protected void disposeInternal(long handle) { - TablePropertiesCollectorFactory.deleteCompactOnDeletionCollectorFactory(handle); - } - }; - } - - private static native long newCompactOnDeletionCollectorFactory( - final long slidingWindowSize, final long deletionTrigger, final double deletionRatio); - - private static native void deleteCompactOnDeletionCollectorFactory(final long handle); -} +// Copyright (c) Meta Platforms, Inc. and affiliates. +// +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +/** + * Table Properties Collector Factory. + */ +public abstract class TablePropertiesCollectorFactory extends RocksObject { + private TablePropertiesCollectorFactory(final long nativeHandle) { + super(nativeHandle); + } + + /** + * Creates a factory of a table property collector that marks a SST + * file as need-compaction when it observes at least "D" deletion + * entries in any "N" consecutive entries, or the ratio of tombstone + * entries >= deletion_ratio. + * + * @param slidingWindowSize "N".Note that this number will be + * round up to the smallest multiple of 128 that is no less + * than the specified size. + * @param deletionTrigger "D". Note that even when "N" is changed, + * the specified number for "D" will not be changed. + * @param deletionRatio if <= 0 or > 1, disable triggering compaction + * based on deletion ratio. Disabled by default. + * + * @return the new compact on deletion collector factory. + */ + public static TablePropertiesCollectorFactory createNewCompactOnDeletionCollectorFactory( + final long slidingWindowSize, final long deletionTrigger, final double deletionRatio) { + final long handle = + newCompactOnDeletionCollectorFactory(slidingWindowSize, deletionTrigger, deletionRatio); + return new TablePropertiesCollectorFactory(handle) { + @Override + protected void disposeInternal(final long handle) { + TablePropertiesCollectorFactory.deleteCompactOnDeletionCollectorFactory(handle); + } + }; + } + + /** + * Internal API. Do not use. + * + * @param nativeHandle the native handle to wrap. + * + * @return the new TablePropertiesCollectorFactory. + */ + static TablePropertiesCollectorFactory newWrapper(final long nativeHandle) { + return new TablePropertiesCollectorFactory(nativeHandle) { + @Override + protected void disposeInternal(long handle) { + TablePropertiesCollectorFactory.deleteCompactOnDeletionCollectorFactory(handle); + } + }; + } + + private static native long newCompactOnDeletionCollectorFactory( + final long slidingWindowSize, final long deletionTrigger, final double deletionRatio); + + private static native void deleteCompactOnDeletionCollectorFactory(final long handle); +} diff --git a/java/src/main/java/org/rocksdb/Transaction.java b/java/src/main/java/org/rocksdb/Transaction.java index 7b1fc532a2df..827d2e6aa8e6 100644 --- a/java/src/main/java/org/rocksdb/Transaction.java +++ b/java/src/main/java/org/rocksdb/Transaction.java @@ -1647,8 +1647,7 @@ public void put(final ColumnFamilyHandle columnFamilyHandle, final ByteBuffer ke * {@link Status.Code#Busy} if there is a write conflict, * {@link Status.Code#TimedOut} if a lock could not be acquired, * {@link Status.Code#TryAgain} if the memtable history size is not large - * enough. See - * {@link ColumnFamilyOptions#maxWriteBufferNumberToMaintain()} + * enough. * * @param columnFamilyHandle The column family to put the key/value into * @param key the specified key to be inserted. diff --git a/java/src/test/java/org/rocksdb/OptionsTest.java b/java/src/test/java/org/rocksdb/OptionsTest.java index c78d0f76b3a4..d8e17e14ad56 100644 --- a/java/src/test/java/org/rocksdb/OptionsTest.java +++ b/java/src/test/java/org/rocksdb/OptionsTest.java @@ -1469,7 +1469,8 @@ public void onMemTableSealed(final MemTableInfo memTableInfo) { public void tablePropertiesCollectorFactory() { try (final Options options = new Options()) { try (TablePropertiesCollectorFactory collectorFactory = - TablePropertiesCollectorFactory.NewCompactOnDeletionCollectorFactory(10, 10, 1.0)) { + TablePropertiesCollectorFactory.createNewCompactOnDeletionCollectorFactory( + 10, 10, 1.0)) { List factories = Arrays.asList(collectorFactory); options.setTablePropertiesCollectorFactory(factories); } From 9677a948fedbf15685850fc4effda2af2a440fe0 Mon Sep 17 00:00:00 2001 From: Adam Retter Date: Thu, 21 Dec 2023 23:15:55 +0100 Subject: [PATCH 11/15] Fix PMD violations --- .../main/java/org/rocksdb/CompressionOptions.java | 2 +- java/src/main/java/org/rocksdb/KeyMayExist.java | 5 ++--- java/src/main/java/org/rocksdb/LiveFileMetaData.java | 1 + java/src/main/java/org/rocksdb/util/SizeUnit.java | 12 ++++++------ 4 files changed, 10 insertions(+), 10 deletions(-) diff --git a/java/src/main/java/org/rocksdb/CompressionOptions.java b/java/src/main/java/org/rocksdb/CompressionOptions.java index 138634c47a1d..acc11863d02c 100644 --- a/java/src/main/java/org/rocksdb/CompressionOptions.java +++ b/java/src/main/java/org/rocksdb/CompressionOptions.java @@ -13,7 +13,7 @@ public class CompressionOptions extends RocksObject { * RocksDB's generic default compression level. Internally it'll be translated * to the default compression level specific to the library being used. */ - public static final int DEFAULT_COMPRESSION_LEVEL = 32767; + public static final int DEFAULT_COMPRESSION_LEVEL = 32_767; /** * Constructs a new CompressionOptions. diff --git a/java/src/main/java/org/rocksdb/KeyMayExist.java b/java/src/main/java/org/rocksdb/KeyMayExist.java index a87af6a05a7d..60317d264fd7 100644 --- a/java/src/main/java/org/rocksdb/KeyMayExist.java +++ b/java/src/main/java/org/rocksdb/KeyMayExist.java @@ -5,7 +5,6 @@ package org.rocksdb; -import java.nio.ByteBuffer; import java.util.Objects; /** @@ -28,8 +27,8 @@ public int hashCode() { } /** - * Part of the return type from {@link RocksDB#keyMayExist(ColumnFamilyHandle, ByteBuffer, - * ByteBuffer)}. + * Part of the return type from {@link RocksDB#keyMayExist(ColumnFamilyHandle, + * java.nio.ByteBuffer, java.nio.ByteBuffer)}. */ public enum KeyMayExistEnum { /** diff --git a/java/src/main/java/org/rocksdb/LiveFileMetaData.java b/java/src/main/java/org/rocksdb/LiveFileMetaData.java index 9cac783eeb20..a15a5737489c 100644 --- a/java/src/main/java/org/rocksdb/LiveFileMetaData.java +++ b/java/src/main/java/org/rocksdb/LiveFileMetaData.java @@ -46,6 +46,7 @@ public int level() { return level; } + @SuppressWarnings("PMD.UnusedPrivateMethod") private long newLiveFileMetaDataHandle() { return newLiveFileMetaDataHandle(columnFamilyName(), columnFamilyName().length, level(), fileName(), path(), size(), smallestSeqno(), largestSeqno(), smallestKey(), diff --git a/java/src/main/java/org/rocksdb/util/SizeUnit.java b/java/src/main/java/org/rocksdb/util/SizeUnit.java index 89938558f24d..a6205eba266c 100644 --- a/java/src/main/java/org/rocksdb/util/SizeUnit.java +++ b/java/src/main/java/org/rocksdb/util/SizeUnit.java @@ -8,29 +8,29 @@ /** * Simple factors of byte sizes. */ -public interface SizeUnit { +public class SizeUnit { /** * 1 Kilobyte. */ - long KB = 1024L; + public static final long KB = 1024L; /** * 1 Megabyte. */ - long MB = KB * KB; + public static final long MB = KB * KB; /** * 1 Gigabyte. */ - long GB = KB * MB; + public static final long GB = KB * MB; /** * 1 Terabyte. */ - long TB = KB * GB; + public static final long TB = KB * GB; /** * 1 Petabyte. */ - long PB = KB * TB; + public static final long PB = KB * TB; } From 8214d3ab0d6c09f85cfdb40248fc2861a0196ca8 Mon Sep 17 00:00:00 2001 From: Adam Retter Date: Fri, 22 Dec 2023 00:07:44 +0100 Subject: [PATCH 12/15] Change English to American-English --- .../java/org/rocksdb/CompactRangeOptions.java | 20 +++++++++---------- .../org/rocksdb/CompactRangeOptionsTest.java | 18 ++++++++--------- 2 files changed, 19 insertions(+), 19 deletions(-) diff --git a/java/src/main/java/org/rocksdb/CompactRangeOptions.java b/java/src/main/java/org/rocksdb/CompactRangeOptions.java index 710c9614caa7..3df1939f9ee3 100644 --- a/java/src/main/java/org/rocksdb/CompactRangeOptions.java +++ b/java/src/main/java/org/rocksdb/CompactRangeOptions.java @@ -293,24 +293,24 @@ public Timestamp fullHistoryTSLow() { } /** - * Set cancelled. + * Set canceled. * - * @param cancelled true to cancel, otherwise false. + * @param canceled true to cancel, otherwise false. * * @return This CompactRangeOptions. */ - public CompactRangeOptions setCancelled(final boolean cancelled) { - setCancelled(nativeHandle_, cancelled); + public CompactRangeOptions setCanceled(final boolean canceled) { + setCanceled(nativeHandle_, canceled); return this; } /** - * Get the cancelled status. + * Get the canceled status. * - * @return true if cancelled, false otherwise. + * @return true if canceled, false otherwise. */ - public boolean cancelled() { - return cancelled(nativeHandle_); + public boolean canceled() { + return canceled(nativeHandle_); } private static native long newCompactRangeOptions(); @@ -342,7 +342,7 @@ private static native void setFullHistoryTSLow( private static native Timestamp fullHistoryTSLow(final long handle); - private static native void setCancelled(final long handle, final boolean canceled); + private static native void setCanceled(final long handle, final boolean canceled); - private static native boolean cancelled(final long handle); + private static native boolean canceled(final long handle); } diff --git a/java/src/test/java/org/rocksdb/CompactRangeOptionsTest.java b/java/src/test/java/org/rocksdb/CompactRangeOptionsTest.java index 9c6689ea8906..549b74beb1cc 100644 --- a/java/src/test/java/org/rocksdb/CompactRangeOptionsTest.java +++ b/java/src/test/java/org/rocksdb/CompactRangeOptionsTest.java @@ -125,14 +125,14 @@ public void fullHistoryTSLowDefault() { @Test public void canceled() { CompactRangeOptions opt = new CompactRangeOptions(); - assertThat(opt.cancelled()).isEqualTo(false); - opt.setCancelled(true); - assertThat(opt.cancelled()).isEqualTo(true); - opt.setCancelled(false); - assertThat(opt.cancelled()).isEqualTo(false); - opt.setCancelled(true); - assertThat(opt.cancelled()).isEqualTo(true); - opt.setCancelled(true); - assertThat(opt.cancelled()).isEqualTo(true); + assertThat(opt.canceled()).isEqualTo(false); + opt.setCanceled(true); + assertThat(opt.canceled()).isEqualTo(true); + opt.setCanceled(false); + assertThat(opt.canceled()).isEqualTo(false); + opt.setCanceled(true); + assertThat(opt.canceled()).isEqualTo(true); + opt.setCanceled(true); + assertThat(opt.canceled()).isEqualTo(true); } } From 6b58f010efa513201959cdc372e1d55fb95dfab2 Mon Sep 17 00:00:00 2001 From: Adam Retter Date: Fri, 22 Dec 2023 00:33:06 +0100 Subject: [PATCH 13/15] Add missing RocksJava tests to Makefile, and order tests alphabetically --- java/Makefile | 50 +++++++++++++++++++++++++++----------------------- 1 file changed, 27 insertions(+), 23 deletions(-) diff --git a/java/Makefile b/java/Makefile index 70c0d7f1b007..1ef540915c40 100644 --- a/java/Makefile +++ b/java/Makefile @@ -115,12 +115,11 @@ JAVA_TESTS = \ org.rocksdb.BuiltinComparatorTest\ org.rocksdb.ByteBufferUnsupportedOperationTest\ org.rocksdb.BytewiseComparatorRegressionTest\ - org.rocksdb.util.BytewiseComparatorTest\ - org.rocksdb.util.BytewiseComparatorIntTest\ org.rocksdb.CheckPointTest\ org.rocksdb.ClockCacheTest\ org.rocksdb.ColumnFamilyOptionsTest\ org.rocksdb.ColumnFamilyTest\ + org.rocksdb.CompactRangeOptionsTest\ org.rocksdb.CompactionFilterFactoryTest\ org.rocksdb.CompactionJobInfoTest\ org.rocksdb.CompactionJobStatsTest\ @@ -132,21 +131,21 @@ JAVA_TESTS = \ org.rocksdb.ComparatorOptionsTest\ org.rocksdb.CompressionOptionsTest\ org.rocksdb.CompressionTypesTest\ + org.rocksdb.ConcurrentTaskLimiterTest\ + org.rocksdb.DefaultEnvTest\ org.rocksdb.DBOptionsTest\ org.rocksdb.DirectSliceTest\ - org.rocksdb.util.EnvironmentTest\ org.rocksdb.EnvOptionsTest\ org.rocksdb.EventListenerTest\ - org.rocksdb.IngestExternalFileOptionsTest\ - org.rocksdb.util.IntComparatorTest\ - org.rocksdb.util.JNIComparatorTest\ org.rocksdb.FilterTest\ + org.rocksdb.FlushOptionsTest\ org.rocksdb.FlushTest\ + org.rocksdb.HyperClockCacheTest\ org.rocksdb.ImportColumnFamilyTest\ org.rocksdb.InfoLogLevelTest\ + org.rocksdb.IngestExternalFileOptionsTest\ org.rocksdb.KeyExistsTest \ org.rocksdb.KeyMayExistTest\ - org.rocksdb.ConcurrentTaskLimiterTest\ org.rocksdb.LoggerTest\ org.rocksdb.LRUCacheTest\ org.rocksdb.MemoryUtilTest\ @@ -154,10 +153,10 @@ JAVA_TESTS = \ org.rocksdb.MergeCFVariantsTest\ org.rocksdb.MergeTest\ org.rocksdb.MergeVariantsTest\ - org.rocksdb.MultiColumnRegressionTest \ + org.rocksdb.MixedOptionsTest\ + org.rocksdb.MultiColumnRegressionTest\ org.rocksdb.MultiGetManyKeysTest\ org.rocksdb.MultiGetTest\ - org.rocksdb.MixedOptionsTest\ org.rocksdb.MutableColumnFamilyOptionsTest\ org.rocksdb.MutableDBOptionsTest\ org.rocksdb.MutableOptionsGetSetTest \ @@ -166,48 +165,53 @@ JAVA_TESTS = \ org.rocksdb.OptimisticTransactionTest\ org.rocksdb.OptimisticTransactionDBTest\ org.rocksdb.OptimisticTransactionOptionsTest\ - org.rocksdb.OptionsUtilTest\ org.rocksdb.OptionsTest\ - org.rocksdb.PerfLevelTest \ + org.rocksdb.OptionsUtilTest\ org.rocksdb.PerfContextTest \ + org.rocksdb.PerfLevelTest \ + org.rocksdb.PlainTableConfigTest\ org.rocksdb.PutCFVariantsTest\ + org.rocksdb.PutMultiplePartsTest\ org.rocksdb.PutVariantsTest\ - org.rocksdb.PlainTableConfigTest\ org.rocksdb.RateLimiterTest\ org.rocksdb.ReadOnlyTest\ org.rocksdb.ReadOptionsTest\ - org.rocksdb.util.ReverseBytewiseComparatorIntTest\ - org.rocksdb.RocksDBTest\ org.rocksdb.RocksDBExceptionTest\ - org.rocksdb.DefaultEnvTest\ + org.rocksdb.RocksDBTest\ org.rocksdb.RocksIteratorTest\ org.rocksdb.RocksMemEnvTest\ - org.rocksdb.util.SizeUnitTest\ org.rocksdb.SecondaryDBTest\ org.rocksdb.SliceTest\ org.rocksdb.SnapshotTest\ org.rocksdb.SstFileManagerTest\ - org.rocksdb.SstFileWriterTest\ org.rocksdb.SstFileReaderTest\ + org.rocksdb.SstFileWriterTest\ org.rocksdb.SstPartitionerTest\ + org.rocksdb.StatisticsCollectorTest\ + org.rocksdb.StatisticsTest\ org.rocksdb.TableFilterTest\ org.rocksdb.TimedEnvTest\ - org.rocksdb.TransactionTest\ - org.rocksdb.TransactionDBTest\ - org.rocksdb.TransactionOptionsTest\ org.rocksdb.TransactionDBOptionsTest\ + org.rocksdb.TransactionDBTest\ org.rocksdb.TransactionLogIteratorTest\ + org.rocksdb.TransactionOptionsTest\ + org.rocksdb.TransactionTest\ org.rocksdb.TtlDBTest\ - org.rocksdb.StatisticsTest\ - org.rocksdb.StatisticsCollectorTest\ org.rocksdb.VerifyChecksumsTest\ org.rocksdb.WalFilterTest\ org.rocksdb.WALRecoveryModeTest\ org.rocksdb.WriteBatchHandlerTest\ org.rocksdb.WriteBatchTest\ org.rocksdb.WriteBatchThreadedTest\ - org.rocksdb.WriteOptionsTest\ org.rocksdb.WriteBatchWithIndexTest\ + org.rocksdb.WriteOptionsTest\ + org.rocksdb.util.BytewiseComparatorIntTest\ + org.rocksdb.util.BytewiseComparatorTest\ + org.rocksdb.util.EnvironmentTest\ + org.rocksdb.util.IntComparatorTest\ + org.rocksdb.util.JNIComparatorTest\ + org.rocksdb.util.ReverseBytewiseComparatorIntTest\ + org.rocksdb.util.SizeUnitTest\ org.rocksdb.util.StdErrLoggerTest MAIN_SRC = src/main/java From 4e47bb77868c4c1ddec1fd4d624441d6e5802f8a Mon Sep 17 00:00:00 2001 From: Adam Retter Date: Tue, 20 Feb 2024 22:19:31 +0100 Subject: [PATCH 14/15] Implement missing method --- java/src/main/java/org/rocksdb/util/StdErrLogger.java | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/java/src/main/java/org/rocksdb/util/StdErrLogger.java b/java/src/main/java/org/rocksdb/util/StdErrLogger.java index 00b08d384522..656535c3ca83 100644 --- a/java/src/main/java/org/rocksdb/util/StdErrLogger.java +++ b/java/src/main/java/org/rocksdb/util/StdErrLogger.java @@ -47,6 +47,11 @@ public LoggerType getLoggerType() { return LoggerType.STDERR_IMPLEMENTATION; } + @Override + public long getNativeHandle() { + return nativeHandle_; + } + private static native long newStdErrLogger( final byte logLevel, /* @Nullable */ final String logPrefix); private static native void setInfoLogLevel(final long handle, final byte logLevel); From b542018426942e51aca2d6aa16c510ef6ebad8b4 Mon Sep 17 00:00:00 2001 From: Adam Retter Date: Wed, 10 Jan 2024 12:52:09 +0100 Subject: [PATCH 15/15] Ran 'make format' --- .../rocksdb/HashLinkedListMemTableConfig.java | 3 +- .../main/java/org/rocksdb/HistogramType.java | 4 +- java/src/main/java/org/rocksdb/Logger.java | 6 +- .../java/org/rocksdb/MutableDBOptions.java | 3 +- .../src/main/java/org/rocksdb/TickerType.java | 2472 +++++++++-------- 5 files changed, 1247 insertions(+), 1241 deletions(-) diff --git a/java/src/main/java/org/rocksdb/HashLinkedListMemTableConfig.java b/java/src/main/java/org/rocksdb/HashLinkedListMemTableConfig.java index 509441ddf81c..9d490a017853 100644 --- a/java/src/main/java/org/rocksdb/HashLinkedListMemTableConfig.java +++ b/java/src/main/java/org/rocksdb/HashLinkedListMemTableConfig.java @@ -33,8 +33,7 @@ public class HashLinkedListMemTableConfig extends MemTableConfig { /** * The default of whether to log when a bucket is flushed. */ - public static final boolean - DEFAULT_IF_LOG_BUCKET_DIST_WHEN_FLUSH = true; + public static final boolean DEFAULT_IF_LOG_BUCKET_DIST_WHEN_FLUSH = true; /** * The default threshold for determining when to use a Skip List. diff --git a/java/src/main/java/org/rocksdb/HistogramType.java b/java/src/main/java/org/rocksdb/HistogramType.java index d503e75154b4..ea9523e0bd93 100644 --- a/java/src/main/java/org/rocksdb/HistogramType.java +++ b/java/src/main/java/org/rocksdb/HistogramType.java @@ -9,7 +9,6 @@ * The types of histogram. */ public enum HistogramType { - /** * DB Get. */ @@ -354,7 +353,8 @@ public enum HistogramType { ASYNC_PREFETCH_ABORT_MICROS((byte) 0x3C), /** - * Number of bytes read for RocksDB's prefetching contents (as opposed to file system's prefetch) from the end of SST table during block based table open. + * Number of bytes read for RocksDB's prefetching contents (as opposed to file system's prefetch) + * from the end of SST table during block based table open. */ TABLE_OPEN_PREFETCH_TAIL_READ_BYTES((byte) 0x3D), diff --git a/java/src/main/java/org/rocksdb/Logger.java b/java/src/main/java/org/rocksdb/Logger.java index e967bea33fa3..42cc2e2057d2 100644 --- a/java/src/main/java/org/rocksdb/Logger.java +++ b/java/src/main/java/org/rocksdb/Logger.java @@ -110,8 +110,7 @@ public final LoggerType getLoggerType() { * @param logLevel the log level. * @param logMsg the log message. */ - protected abstract void log(final InfoLogLevel logLevel, - final String logMsg); + protected abstract void log(final InfoLogLevel logLevel, final String logMsg); /** * Create a new Logger with Options. @@ -128,8 +127,7 @@ protected abstract void log(final InfoLogLevel logLevel, * @param handle the native handle to the underlying C++ native Logger object. * @param logLevel the log level. */ - protected native void setInfoLogLevel(final long handle, - final byte logLevel); + protected native void setInfoLogLevel(final long handle, final byte logLevel); /** * Get the log level. diff --git a/java/src/main/java/org/rocksdb/MutableDBOptions.java b/java/src/main/java/org/rocksdb/MutableDBOptions.java index 2913e6c71d00..07d4624b6035 100644 --- a/java/src/main/java/org/rocksdb/MutableDBOptions.java +++ b/java/src/main/java/org/rocksdb/MutableDBOptions.java @@ -160,7 +160,8 @@ public enum DBOption implements MutableDBOptionKey { compaction_readahead_size(ValueType.LONG), /** - * Signifies periods characterized by significantly less read and write activity compared to other times. + * Signifies periods characterized by significantly less read and write activity compared to + * other times. */ daily_offpeak_time_utc(ValueType.STRING); diff --git a/java/src/main/java/org/rocksdb/TickerType.java b/java/src/main/java/org/rocksdb/TickerType.java index 70c2def2525b..c3d7213b261d 100644 --- a/java/src/main/java/org/rocksdb/TickerType.java +++ b/java/src/main/java/org/rocksdb/TickerType.java @@ -16,1238 +16,1246 @@ * should descend into negative values until TICKER_ENUM_MAX reaches -128 (-0x80). */ public enum TickerType { - - /** - * total block cache misses - *

- * REQUIRES: BLOCK_CACHE_MISS == BLOCK_CACHE_INDEX_MISS + - * BLOCK_CACHE_FILTER_MISS + - * BLOCK_CACHE_DATA_MISS; - */ - BLOCK_CACHE_MISS((byte) 0x0), - - /** - * total block cache hit - *

- * REQUIRES: BLOCK_CACHE_HIT == BLOCK_CACHE_INDEX_HIT + - * BLOCK_CACHE_FILTER_HIT + - * BLOCK_CACHE_DATA_HIT; - */ - BLOCK_CACHE_HIT((byte) 0x1), - - BLOCK_CACHE_ADD((byte) 0x2), - - /** - * Number of failures when adding blocks to block cache. - */ - BLOCK_CACHE_ADD_FAILURES((byte) 0x3), - - /** - * Number of times cache miss when accessing index block from block cache. - */ - BLOCK_CACHE_INDEX_MISS((byte) 0x4), - - /** - * Number of times cache hit when accessing index block from block cache. - */ - BLOCK_CACHE_INDEX_HIT((byte) 0x5), - - /** - * Number of index blocks added to block cache. - */ - BLOCK_CACHE_INDEX_ADD((byte) 0x6), - - /** - * Number of bytes of index blocks inserted into cache - */ - BLOCK_CACHE_INDEX_BYTES_INSERT((byte) 0x7), - - /** - * Number of times cache miss when accessing filter block from block cache. - */ - BLOCK_CACHE_FILTER_MISS((byte) 0x8), - - /** - * Number of times cache hit when accessing filter block from block cache. - */ - BLOCK_CACHE_FILTER_HIT((byte) 0x9), - - /** - * Number of filter blocks added to block cache. - */ - BLOCK_CACHE_FILTER_ADD((byte) 0xA), - - /** - * Number of bytes of bloom filter blocks inserted into cache - */ - BLOCK_CACHE_FILTER_BYTES_INSERT((byte) 0xB), - - /** - * Number of times cache miss when accessing data block from block cache. - */ - BLOCK_CACHE_DATA_MISS((byte) 0xC), - - /** - * Number of times cache hit when accessing data block from block cache. - */ - BLOCK_CACHE_DATA_HIT((byte) 0xD), - - /** - * Number of data blocks added to block cache. - */ - BLOCK_CACHE_DATA_ADD((byte) 0xE), - - /** - * Number of bytes of data blocks inserted into cache - */ - BLOCK_CACHE_DATA_BYTES_INSERT((byte) 0xF), - - /** - * Number of bytes read from cache. - */ - BLOCK_CACHE_BYTES_READ((byte) 0x10), - - /** - * Number of bytes written into cache. - */ - BLOCK_CACHE_BYTES_WRITE((byte) 0x11), - - /** - * Number of Block cache Compression dictionary misses. - */ - BLOCK_CACHE_COMPRESSION_DICT_MISS((byte) 0x12), - - /** - * Number of Block cache Compression dictionary hits. - */ - BLOCK_CACHE_COMPRESSION_DICT_HIT((byte) 0x13), - - /** - * Number of Block cache Compression dictionary additions. - */ - BLOCK_CACHE_COMPRESSION_DICT_ADD((byte) 0x14), - - /** - * Number of Block cache Compression dictionary bytes inserted. - */ - BLOCK_CACHE_COMPRESSION_DICT_BYTES_INSERT((byte) 0x15), - - /** - * Redundant additions to block cache. - */ - BLOCK_CACHE_ADD_REDUNDANT((byte) 0x16), - - /** - * Redundant additions to block cache index. - */ - BLOCK_CACHE_INDEX_ADD_REDUNDANT((byte) 0x17), - - /** - * Redundant additions to block cache filter. - */ - BLOCK_CACHE_FILTER_ADD_REDUNDANT((byte) 0x18), - - /** - * Redundant additions to block cache data. - */ - BLOCK_CACHE_DATA_ADD_REDUNDANT((byte) 0x19), - - /** - * Redundant additions to block cache compression dictionary. - */ - BLOCK_CACHE_COMPRESSION_DICT_ADD_REDUNDANT((byte) 0x1A), - - /** - * Number of secondary cache hits. - */ - SECONDARY_CACHE_HITS((byte) 0x1B), - - /** - * Number of secondary cache filter hits. - */ - SECONDARY_CACHE_FILTER_HITS((byte) 0x1C), - - /** - * Number of secondary cache index hits. - */ - SECONDARY_CACHE_INDEX_HITS((byte) 0x1D), - - /** - * Number of secondary cache data hits. - */ - SECONDARY_CACHE_DATA_HITS((byte) 0x1E), - - /** - * Number of compressed secondary cache dummy hits. - */ - COMPRESSED_SECONDARY_CACHE_DUMMY_HITS((byte) 0x1F), - - /** - * Number of compressed secondary cache hits. - */ - COMPRESSED_SECONDARY_CACHE_HITS((byte) 0x20), - - /** - * Number of compressed secondary cache promotions. - */ - COMPRESSED_SECONDARY_CACHE_PROMOTIONS((byte) 0x21), - - /** - * Number of compressed secondary cache promotion skips. - */ - COMPRESSED_SECONDARY_CACHE_PROMOTION_SKIPS((byte) 0x22), - - /** - * Number of times bloom filter has avoided file reads. - */ - BLOOM_FILTER_USEFUL((byte) 0x23), - - /** - * Number of times bloom FullFilter has not avoided the reads. - */ - BLOOM_FILTER_FULL_POSITIVE((byte) 0x24), - - /** - * Number of times bloom FullFilter has not avoided the reads and data actually - * exist. - */ - BLOOM_FILTER_FULL_TRUE_POSITIVE((byte) 0x25), - - /** - * Number of times bloom was checked before creating iterator on a file. - */ - BLOOM_FILTER_PREFIX_CHECKED((byte) 0x26), - - /** - * Number of times it was useful (in avoiding iterator creation) that bloom was checked before creating iterator on a file. - */ - BLOOM_FILTER_PREFIX_USEFUL((byte) 0x27), - - /** - * Number of times bloom produced a true positive result. - */ - BLOOM_FILTER_PREFIX_TRUE_POSITIVE((byte) 0x28), - - /** - * Number of persistent cache hit - */ - PERSISTENT_CACHE_HIT((byte) 0x29), - - /** - * Number of persistent cache miss - */ - PERSISTENT_CACHE_MISS((byte) 0x2A), - - /** - * Number of total simulation block cache hits - */ - SIM_BLOCK_CACHE_HIT((byte) 0x2B), - - /** - * Number of total simulation block cache misses - */ - SIM_BLOCK_CACHE_MISS((byte) 0x2C), - - /** - * Number of memtable hits. - */ - MEMTABLE_HIT((byte) 0x2D), - - /** - * Number of of memtable misses. - */ - MEMTABLE_MISS((byte) 0x2E), - - /** - * Number of Get() queries served by L0 - */ - GET_HIT_L0((byte) 0x2F), - - /** - * Number of Get() queries served by L1 - */ - GET_HIT_L1((byte) 0x30), - - /** - * Number of Get() queries served by L2 and up - */ - GET_HIT_L2_AND_UP((byte) 0x31), - - /** - * COMPACTION_KEY_DROP_* count the reasons for key drop during compaction - * There are 4 reasons currently. - */ - - /** - * key was written with a newer value. - */ - COMPACTION_KEY_DROP_NEWER_ENTRY((byte) 0x32), - - /** - * Also includes keys dropped for range del. - * The key is obsolete. - */ - COMPACTION_KEY_DROP_OBSOLETE((byte) 0x33), - - /** - * key was covered by a range tombstone. - */ - COMPACTION_KEY_DROP_RANGE_DEL((byte) 0x34), - - /** - * User compaction function has dropped the key. - */ - COMPACTION_KEY_DROP_USER((byte) 0x35), - - /** - * all keys in range were deleted. - */ - COMPACTION_RANGE_DEL_DROP_OBSOLETE((byte) 0x36), - - /** - * Deletions obsoleted before bottom level due to file gap optimization. - */ - COMPACTION_OPTIMIZED_DEL_DROP_OBSOLETE((byte) 0x37), - - /** - * Compactions cancelled to prevent ENOSPC - */ - COMPACTION_CANCELLED((byte) 0x38), - - /** - * Number of keys written to the database via the Put and Write call's. - */ - NUMBER_KEYS_WRITTEN((byte) 0x39), - - /** - * Number of Keys read. - */ - NUMBER_KEYS_READ((byte) 0x3A), - - /** - * Number keys updated, if inplace update is enabled - */ - NUMBER_KEYS_UPDATED((byte) 0x3B), - - /** - * The number of uncompressed bytes issued by DB::Put(), DB::Delete(),\ - * DB::Merge(), and DB::Write(). - */ - BYTES_WRITTEN((byte) 0x3C), - - /** - * The number of uncompressed bytes read from DB::Get(). It could be - * either from memtables, cache, or table files. - * - * For the number of logical bytes read from DB::MultiGet(), - * please use {@link #NUMBER_MULTIGET_BYTES_READ}. - */ - BYTES_READ((byte) 0x3D), - - /** - * The number of calls to seek. - */ - NUMBER_DB_SEEK((byte) 0x3E), - - /** - * The number of calls to next. - */ - NUMBER_DB_NEXT((byte) 0x3F), - - /** - * The number of calls to prev. - */ - NUMBER_DB_PREV((byte) 0x40), - - /** - * The number of calls to seek that returned data. - */ - NUMBER_DB_SEEK_FOUND((byte) 0x41), - - /** - * The number of calls to next that returned data. - */ - NUMBER_DB_NEXT_FOUND((byte) 0x42), - - /** - * The number of calls to prev that returned data. - */ - NUMBER_DB_PREV_FOUND((byte) 0x43), - - /** - * The number of uncompressed bytes read from an iterator. - * Includes size of key and value. - */ - ITER_BYTES_READ((byte) 0x44), - - /** - * Number of internal skipped during iteration - */ - NUMBER_ITER_SKIP((byte) 0x45), - - /** - * Number of times we had to reseek inside an iteration to skip - * over large number of keys with same userkey. - */ - NUMBER_OF_RESEEKS_IN_ITERATION((byte) 0x46), - - /** - * Number of iterators created. - */ - NO_ITERATOR_CREATED((byte) 0x47), - - /** - * Number of iterators deleted. - */ - NO_ITERATOR_DELETED((byte) 0x48), - - /** - * Number of file opens. - */ - NO_FILE_OPENS((byte) 0x49), - - /** - * Number of file errors. - */ - NO_FILE_ERRORS((byte) 0x4A), - - /** - * Writer has to wait for compaction or flush to finish. - */ - STALL_MICROS((byte) 0x4B), - - /** - * The wait time for db mutex. - * - * Disabled by default. To enable it set stats level to {@link StatsLevel#ALL} - */ - DB_MUTEX_WAIT_MICROS((byte) 0x4C), - - /** - * Number of MultiGet calls. - */ - NUMBER_MULTIGET_CALLS((byte) 0x4D), - - /** - * Number of MultiGet keys read. - */ - NUMBER_MULTIGET_KEYS_READ((byte) 0x4E), - - /** - * Number of MultiGet bytes read. - */ - NUMBER_MULTIGET_BYTES_READ((byte) 0x4F), - - /** - * Number of MultiGet keys found (vs number requested) - */ - NUMBER_MULTIGET_KEYS_FOUND((byte) 0x50), - - /** - * Number of Merge failures. - */ - NUMBER_MERGE_FAILURES((byte) 0x51), - - /** - * Record the number of calls to {@link RocksDB#getUpdatesSince(long)}. Useful to keep track of - * transaction log iterator refreshes. - */ - GET_UPDATES_SINCE_CALLS((byte) 0x52), - - /** - * Number of times WAL sync is done. - */ - WAL_FILE_SYNCED((byte) 0x53), - - /** - * Number of bytes written to WAL. - */ - WAL_FILE_BYTES((byte) 0x54), - - /** - * Writes can be processed by requesting thread or by the thread at the - * head of the writers queue. - */ - WRITE_DONE_BY_SELF((byte) 0x55), - - /** - * Equivalent to writes done for others. - */ - WRITE_DONE_BY_OTHER((byte) 0x56), - - /** - * Number of Write calls that request WAL. - */ - WRITE_WITH_WAL((byte) 0x57), - - /** - * Bytes read during compaction. - */ - COMPACT_READ_BYTES((byte) 0x58), - - /** - * Bytes written during compaction. - */ - COMPACT_WRITE_BYTES((byte) 0x59), - - /** - * Bytes written during flush. - */ - FLUSH_WRITE_BYTES((byte) 0x5A), - - /** - * Compaction read bytes marked. - */ - COMPACT_READ_BYTES_MARKED((byte) 0x5B), - - /** - * Compaction read bytes periodically. - */ - COMPACT_READ_BYTES_PERIODIC((byte) 0x5C), - - /** - * Compaction read bytes TTL. - */ - COMPACT_READ_BYTES_TTL((byte) 0x5D), - - /** - * Compaction write bytes marked. - */ - COMPACT_WRITE_BYTES_MARKED((byte) 0x5E), - - /** - * Compaction write bytes periodically. - */ - COMPACT_WRITE_BYTES_PERIODIC((byte) 0x5F), - - /** - * Compaction write bytes TTL. - */ - COMPACT_WRITE_BYTES_TTL((byte) 0x60), - - /** - * Number of table's properties loaded directly from file, without creating table reader object. - */ - NUMBER_DIRECT_LOAD_TABLE_PROPERTIES((byte) 0x61), - - /** - * Number of supervision acquires. - */ - NUMBER_SUPERVERSION_ACQUIRES((byte) 0x62), - - /** - * Number of supervision releases. - */ - NUMBER_SUPERVERSION_RELEASES((byte) 0x63), - - /** - * Number of supervision cleanups. - */ - NUMBER_SUPERVERSION_CLEANUPS((byte) 0x64), - - /** - * Number of compressions executed. - */ - NUMBER_BLOCK_COMPRESSED((byte) 0x65), - - /** - * Number of decompressions executed. - */ - NUMBER_BLOCK_DECOMPRESSED((byte) 0x66), - - /** - * Number of input bytes (uncompressed) to compression for SST blocks that are stored compressed. - */ - BYTES_COMPRESSED_FROM((byte) 0x67), - - /** - * Number of output bytes (compressed) from compression for SST blocks that are stored compressed. - */ - BYTES_COMPRESSED_TO((byte) 0x68), - - /** - * Number of uncompressed bytes for SST blocks that are stored uncompressed because compression type is kNoCompression, or some error case caused compression not to run or produce an output. Index blocks are only counted if enable_index_compression is true. - */ - BYTES_COMPRESSION_BYPASSED((byte) 0x69), - - /** - * Number of input bytes (uncompressed) to compression for SST blocks that are stored uncompressed because the compression result was rejected, either because the ratio was not acceptable (see CompressionOptions::max_compressed_bytes_per_kb) or found invalid by the `verify_compression` option. - */ - BYTES_COMPRESSION_REJECTED((byte) 0x6A), - - /** - * Like {@link #BYTES_COMPRESSION_BYPASSED} but counting number of blocks. - */ - NUMBER_BLOCK_COMPRESSION_BYPASSED((byte) 0x6B), - - /** - * Like {@link #BYTES_COMPRESSION_REJECTED} but counting number of blocks. - */ - NUMBER_BLOCK_COMPRESSION_REJECTED((byte) 0x6C), - - /** - * Number of input bytes (compressed) to decompression in reading compressed SST blocks from storage. - */ - BYTES_DECOMPRESSED_FROM((byte) 0x6D), - - /** - * Number of output bytes (uncompressed) from decompression in reading compressed SST blocks from storage. - */ - BYTES_DECOMPRESSED_TO((byte) 0x6E), - - /** - * Merge operations cumulative time. - */ - MERGE_OPERATION_TOTAL_TIME((byte) 0x6F), - - - /** - * Filter operations cumulative time. - */ - FILTER_OPERATION_TOTAL_TIME((byte) 0x70), - - /** - * Compaction CPU cumulative time. - */ - COMPACTION_CPU_TOTAL_TIME((byte) 0x71), - - /** - * Row cache hits. - */ - ROW_CACHE_HIT((byte) 0x72), - - /** - * Row cache misses. - */ - ROW_CACHE_MISS((byte) 0x73), - - /** - * Read amplification statistics. - * - * Read amplification can be calculated using this formula - * (READ_AMP_TOTAL_READ_BYTES / READ_AMP_ESTIMATE_USEFUL_BYTES) - * - * REQUIRES: ReadOptions::read_amp_bytes_per_bit to be enabled - */ - - /** - * Estimate of total bytes actually used. - */ - READ_AMP_ESTIMATE_USEFUL_BYTES((byte) 0x74), - - /** - * Total size of loaded data blocks. - */ - READ_AMP_TOTAL_READ_BYTES((byte) 0x75), - - /** - * Number of refill intervals where rate limiter's bytes are fully consumed. - */ - NUMBER_RATE_LIMITER_DRAINS((byte) 0x76), - - /** - * BlobDB specific stats - * Number of Put/PutTTL/PutUntil to BlobDB. - */ - BLOB_DB_NUM_PUT((byte) 0x77), - - /** - * Number of Write to BlobDB. - */ - BLOB_DB_NUM_WRITE((byte) 0x78), - - /** - * Number of Get to BlobDB. - */ - BLOB_DB_NUM_GET((byte) 0x79), - - /** - * Number of MultiGet to BlobDB. - */ - BLOB_DB_NUM_MULTIGET((byte) 0x7A), - - /** - * Number of Seek/SeekToFirst/SeekToLast/SeekForPrev to BlobDB iterator. - */ - BLOB_DB_NUM_SEEK((byte) 0x7B), - - /** - * Number of Next to BlobDB iterator. - */ - BLOB_DB_NUM_NEXT((byte) 0x7C), - - /** - * Number of Prev to BlobDB iterator. - */ - BLOB_DB_NUM_PREV((byte) 0x7D), - - /** - * Number of keys written to BlobDB. - */ - BLOB_DB_NUM_KEYS_WRITTEN((byte) 0x7E), - - /** - * Number of keys read from BlobDB. - */ - BLOB_DB_NUM_KEYS_READ((byte) 0x7F), - - /** - * Number of bytes (key + value) written to BlobDB. - */ - BLOB_DB_BYTES_WRITTEN((byte) -0x1), - - /** - * Number of bytes (keys + value) read from BlobDB. - */ - BLOB_DB_BYTES_READ((byte) -0x2), - - /** - * Number of keys written by BlobDB as non-TTL inlined value. - */ - BLOB_DB_WRITE_INLINED((byte) -0x3), - - /** - * Number of keys written by BlobDB as TTL inlined value. - */ - BLOB_DB_WRITE_INLINED_TTL((byte) -0x4), - - /** - * Number of keys written by BlobDB as non-TTL blob value. - */ - BLOB_DB_WRITE_BLOB((byte) -0x5), - - /** - * Number of keys written by BlobDB as TTL blob value. - */ - BLOB_DB_WRITE_BLOB_TTL((byte) -0x6), - - /** - * Number of bytes written to blob file. - */ - BLOB_DB_BLOB_FILE_BYTES_WRITTEN((byte) -0x7), - - /** - * Number of bytes read from blob file. - */ - BLOB_DB_BLOB_FILE_BYTES_READ((byte) -0x8), - - /** - * Number of times a blob files being synced. - */ - BLOB_DB_BLOB_FILE_SYNCED((byte) -0x9), - - /** - * Number of blob index evicted from base DB by BlobDB compaction filter because - * of expiration. - */ - BLOB_DB_BLOB_INDEX_EXPIRED_COUNT((byte) -0xA), - - /** - * Size of blob index evicted from base DB by BlobDB compaction filter - * because of expiration. - */ - BLOB_DB_BLOB_INDEX_EXPIRED_SIZE((byte) -0xB), - - /** - * Number of blob index evicted from base DB by BlobDB compaction filter because - * of corresponding file deleted. - */ - BLOB_DB_BLOB_INDEX_EVICTED_COUNT((byte) -0xC), - - /** - * Size of blob index evicted from base DB by BlobDB compaction filter - * because of corresponding file deleted. - */ - BLOB_DB_BLOB_INDEX_EVICTED_SIZE((byte) -0xD), - - /** - * Number of blob files being garbage collected. - */ - BLOB_DB_GC_NUM_FILES((byte) -0xE), - - /** - * Number of blob files generated by garbage collection. - */ - BLOB_DB_GC_NUM_NEW_FILES((byte) -0xF), - - /** - * Number of BlobDB garbage collection failures. - */ - BLOB_DB_GC_FAILURES((byte) -0x10), - - /** - * Number of keys relocated to new blob file by garbage collection. - */ - BLOB_DB_GC_NUM_KEYS_RELOCATED((byte) -0x11), - - /** - * Number of bytes relocated to new blob file by garbage collection. - */ - BLOB_DB_GC_BYTES_RELOCATED((byte) -0x12), - - /** - * Number of blob files evicted because of BlobDB is full. - */ - BLOB_DB_FIFO_NUM_FILES_EVICTED((byte) -0x13), - - /** - * Number of keys in the blob files evicted because of BlobDB is full. - */ - BLOB_DB_FIFO_NUM_KEYS_EVICTED((byte) -0x14), - - /** - * Number of bytes in the blob files evicted because of BlobDB is full. - */ - BLOB_DB_FIFO_BYTES_EVICTED((byte) -0x15), - - /** - * Number of times cache miss when accessing blob from blob cache. - */ - BLOB_DB_CACHE_MISS((byte) -0x16), - - /** - * Number of times cache hit when accessing blob from blob cache. - */ - BLOB_DB_CACHE_HIT((byte) -0x17), - - /** - * Number of data blocks added to blob cache. - */ - BLOB_DB_CACHE_ADD((byte) -0x18), - - /** - * Number of failures when adding blobs to blob cache. - */ - BLOB_DB_CACHE_ADD_FAILURES((byte) -0x19), - - /** - * Number of bytes read from blob cache. - */ - BLOB_DB_CACHE_BYTES_READ((byte) -0x1A), - - /** - * Number of bytes written into blob cache. - */ - BLOB_DB_CACHE_BYTES_WRITE((byte) -0x1B), - - /** - * These counters indicate a performance issue in WritePrepared transactions. - * We should not seem them ticking them much. - * Number of times prepare_mutex_ is acquired in the fast path. - */ - TXN_PREPARE_MUTEX_OVERHEAD((byte) -0x1C), - - /** - * Number of times old_commit_map_mutex_ is acquired in the fast path. - */ - TXN_OLD_COMMIT_MAP_MUTEX_OVERHEAD((byte) -0x1D), - - /** - * Number of times we checked a batch for duplicate keys. - */ - TXN_DUPLICATE_KEY_OVERHEAD((byte) -0x1E), - - /** - * Number of times snapshot_mutex_ is acquired in the fast path. - */ - TXN_SNAPSHOT_MUTEX_OVERHEAD((byte) -0x1F), - - /** - * Number of times ::Get returned TryAgain due to expired snapshot seq - */ - TXN_GET_TRY_AGAIN((byte) -0x20), - - /** - * Number of files marked as trash by delete scheduler - */ - FILES_MARKED_TRASH((byte) -0x21), - - /** - * Number of trash files deleted by the background thread from the trash queue - */ - FILES_DELETED_FROM_TRASH_QUEUE((byte) -0x22), - - /** - * Number of files deleted immediately by delete scheduler - */ - FILES_DELETED_IMMEDIATELY((byte) -0x23), - - /** - * DB error handler statistics - */ - ERROR_HANDLER_BG_ERROR_COUNT((byte) -0x24), - - /** - * Number of background errors handled by the error handler. - */ - ERROR_HANDLER_BG_IO_ERROR_COUNT((byte) -0x25), - - /** - * Number of retryable background I/O errors handled by the error handler. - * This is a subset of {@link #ERROR_HANDLER_BG_IO_ERROR_COUNT}. - */ - ERROR_HANDLER_BG_RETRYABLE_IO_ERROR_COUNT((byte) -0x26), - - /** - * Number of auto resumes handled by the error handler. - */ - ERROR_HANDLER_AUTORESUME_COUNT((byte) -0x27), - - /** - * Total Number of auto resume retries handled by the error handler. - */ - ERROR_HANDLER_AUTORESUME_RETRY_TOTAL_COUNT((byte) -0x28), - - /** - * Number of auto resumes that succeded that were handled by the error handler. - */ - ERROR_HANDLER_AUTORESUME_SUCCESS_COUNT((byte) -0x29), - - /** - * Bytes of raw data (payload) found on memtable at flush time. - * Contains the sum of garbage payload (bytes that are discarded - * at flush time) and useful payload (bytes of data that will - * eventually be written to SSTable). - */ - MEMTABLE_PAYLOAD_BYTES_AT_FLUSH((byte) -0x2A), - - /** - * Outdated bytes of data present on memtable at flush time. - */ - MEMTABLE_GARBAGE_BYTES_AT_FLUSH((byte) -0x2B), - - /** - * Bytes read by `VerifyChecksum()` and `VerifyFileChecksums()` APIs. - */ - VERIFY_CHECKSUM_READ_BYTES((byte) -0x2C), - - /** - * Bytes read whilst creating backups. - */ - BACKUP_READ_BYTES((byte) -0x2D), - - /** - * Bytes written whilst creating backups. - */ - BACKUP_WRITE_BYTES((byte) -0x2E), - - /** - * Remote compaction bytes read. - */ - REMOTE_COMPACT_READ_BYTES((byte) -0x2F), - - /** - * Remote compaction bytes written. - */ - REMOTE_COMPACT_WRITE_BYTES((byte) -0x30), - - /** - * Bytes read from hot files. - */ - HOT_FILE_READ_BYTES((byte) -0x31), - - /** - * Bytes read from warm files. - */ - WARM_FILE_READ_BYTES((byte) -0x32), - - /** - * Bytes read from cool files. - */ - COOL_FILE_READ_BYTES((byte) -0x5B), - - /** - * Bytes read from cold files. - */ - COLD_FILE_READ_BYTES((byte) -0x33), - - /** - * Bytes read from ice cold files. - */ - ICE_FILE_READ_BYTES((byte) -0x59), - - /** - * Numer of reads from hot files. - */ - HOT_FILE_READ_COUNT((byte) -0x34), - - /** - * Numer of reads from warm files. - */ - WARM_FILE_READ_COUNT((byte) -0x35), - - /** - * Numer of reads from cool files. - */ - COOL_FILE_READ_COUNT((byte) -0x5C), - - /** - * Numer of reads from cold files. - */ - COLD_FILE_READ_COUNT((byte) -0x36), - - /** - * Numer of reads from ice cold files. - */ - ICE_FILE_READ_COUNT((byte) -0x5A), - - /** - * Bytes read from the last level. - */ - LAST_LEVEL_READ_BYTES((byte) -0x37), - - /** - * Number of reads from the last level. - */ - LAST_LEVEL_READ_COUNT((byte) -0x38), - - /** - * Bytes read from the non-last level. - */ - NON_LAST_LEVEL_READ_BYTES((byte) -0x39), - - /** - * Number of reads from the non-last level. - */ - NON_LAST_LEVEL_READ_COUNT((byte) -0x3A), - - /** - * Statistics on iterator Seek() (and variants) for each sorted run. - * i.e a single user Seek() can result in many sorted run Seek()s. - * The stats are split between last level and non-last level. - * Filtered: a filter such as prefix Bloom filter indicate the Seek() would - * not find anything relevant, so avoided a likely access to data+index - * blocks. - */ - - LAST_LEVEL_SEEK_FILTERED((byte) -0x3B), - /** - * Filter match: a filter such as prefix Bloom filter was queried but did - * not filter out the seek. - */ - LAST_LEVEL_SEEK_FILTER_MATCH((byte) -0x3C), - - /** - * At least one data block was accessed for a Seek() (or variant) on a - * sorted run. - */ - LAST_LEVEL_SEEK_DATA((byte) -0x3D), - - /** - * At least one value() was accessed for the seek (suggesting it was useful), - * and no filter such as prefix Bloom was queried. - */ - LAST_LEVEL_SEEK_DATA_USEFUL_NO_FILTER((byte) -0x3E), - - /** - * At least one value() was accessed for the seek (suggesting it was useful), - * after querying a filter such as prefix Bloom. - */ - LAST_LEVEL_SEEK_DATA_USEFUL_FILTER_MATCH((byte) -0x3F), - - /** - * Similar to {@link #LAST_LEVEL_SEEK_FILTERED} but for the non-last level. - */ - NON_LAST_LEVEL_SEEK_FILTERED((byte) -0x40), - - /** - * Similar to {@link #LAST_LEVEL_SEEK_FILTER_MATCH} but for the non-last level. - */ - NON_LAST_LEVEL_SEEK_FILTER_MATCH((byte) -0x41), - - /** - * Similar to {@link #LAST_LEVEL_SEEK_DATA} but for the non-last level. - */ - NON_LAST_LEVEL_SEEK_DATA((byte) -0x42), - - /** - * Similar to {@link #LAST_LEVEL_SEEK_DATA_USEFUL_NO_FILTER} but for the non-last level. - */ - NON_LAST_LEVEL_SEEK_DATA_USEFUL_NO_FILTER((byte) -0x43), - - /** - * Similar to {@link #LAST_LEVEL_SEEK_DATA_USEFUL_FILTER_MATCH} but for the non-last level. - */ - NON_LAST_LEVEL_SEEK_DATA_USEFUL_FILTER_MATCH((byte) -0x44), - - /** - * Number of block checksum verifications. - */ - BLOCK_CHECKSUM_COMPUTE_COUNT((byte) -0x45), - - /** - * Number of times RocksDB detected a corruption while verifying a block - * checksum. RocksDB does not remember corruptions that happened during user - * reads so the same block corruption may be detected multiple times. - */ - BLOCK_CHECKSUM_MISMATCH_COUNT((byte) -0x46), - - /** - * Number of multiget co-rountines. - */ - MULTIGET_COROUTINE_COUNT((byte) -0x47), - - /** - * Time spent in the ReadAsync file system call. - */ - READ_ASYNC_MICROS((byte) -0x48), - - /** - * Number of errors returned to the async read callback. - */ - ASYNC_READ_ERROR_COUNT((byte) -0x49), - - /** - * Number of lookup into the prefetched tail (see - * `TABLE_OPEN_PREFETCH_TAIL_READ_BYTES`) - * that can't find its data for table open - */ - TABLE_OPEN_PREFETCH_TAIL_MISS((byte) -0x4A), - - /** - * Number of lookup into the prefetched tail (see - * `TABLE_OPEN_PREFETCH_TAIL_READ_BYTES`) - * that finds its data for table open - */ - TABLE_OPEN_PREFETCH_TAIL_HIT((byte) -0x4B), - - /** - * Number of times timestamps are checked on accessing the table - */ - TIMESTAMP_FILTER_TABLE_CHECKED((byte) -0x4C), - - /** - * Number of times timestamps can successfully help skip the table access - */ - TIMESTAMP_FILTER_TABLE_FILTERED((byte) -0x4D), - - /** - * Number of times readahead is trimmed during scans when ReadOptions.auto_readahead_size is set. - */ - READAHEAD_TRIMMED((byte) -0x4E), - - /** - * Maximum size of the FIFO compactions. - */ - FIFO_MAX_SIZE_COMPACTIONS((byte) -0x4F), - - /** - * TTL of the FIFO compactions. - */ - FIFO_TTL_COMPACTIONS((byte) -0x50), - - /** - * Change temperature of the FIFO compactions. - */ - FIFO_CHANGE_TEMPERATURE_COMPACTIONS((byte) -0x58), - - /** - * Number of bytes prefetched during user initiated scan. - */ - PREFETCH_BYTES((byte) -0x51), - - /** - * Number of prefetched bytes that were actually useful during user initiated scan. - */ - PREFETCH_BYTES_USEFUL((byte) -0x52), - - /** - * Number of FS reads avoided due to prefetching during user initiated scan. - */ - PREFETCH_HITS((byte) -0x53), - - /** - * Footer corruption detected when opening an SST file for reading. - */ - SST_FOOTER_CORRUPTION_COUNT((byte) -0x55), - - /** - * Counters for file read retries with the verify_and_reconstruct_read file system option after detecting a checksum mismatch. - */ - FILE_READ_CORRUPTION_RETRY_COUNT((byte) -0x56), - - /** - * Counters for file read retries with the verify_and_reconstruct_read file system option after detecting a checksum mismatch. - */ - FILE_READ_CORRUPTION_RETRY_SUCCESS_COUNT((byte) -0x57), - - /** - * Counter for the number of times a WBWI is ingested into the DB. This - * happens when IngestWriteBatchWithIndex() is used and when large - * transaction optimization is enabled through - * TransactionOptions::large_txn_commit_optimize_threshold. - */ - NUMBER_WBWI_INGEST((byte) -0x5D), - - /** - * Failure to load the UDI during SST table open. - */ - SST_USER_DEFINED_INDEX_LOAD_FAIL_COUNT((byte) -0x5E), - - /** - * Bytes of output files successfully resumed during remote compaction. - */ - REMOTE_COMPACT_RESUMED_BYTES((byte) -0x5F), - - /** - * MultiScan statistics - */ - - /** - * Number of calls to Iterator::Prepare() for multi-scan. - */ - MULTISCAN_PREPARE_CALLS((byte) -0x60), - - /** - * Number of errors during Iterator::Prepare() for multi-scan. - */ - MULTISCAN_PREPARE_ERRORS((byte) -0x61), - - /** - * Number of data blocks prefetched during multi-scan Prepare(). - */ - MULTISCAN_BLOCKS_PREFETCHED((byte) -0x62), - - /** - * Number of data blocks found in cache during multi-scan Prepare(). - */ - MULTISCAN_BLOCKS_FROM_CACHE((byte) -0x63), - - /** - * Total bytes prefetched during multi-scan Prepare(). - */ - MULTISCAN_PREFETCH_BYTES((byte) -0x64), - - /** - * Number of prefetched blocks that were never accessed (wasted). - */ - MULTISCAN_PREFETCH_BLOCKS_WASTED((byte) -0x65), - - /** - * Number of I/O requests issued during multi-scan Prepare(). - */ - MULTISCAN_IO_REQUESTS((byte) -0x66), - - /** - * Number of non-adjacent blocks coalesced into single I/O request. - */ - MULTISCAN_IO_COALESCED_NONADJACENT((byte) -0x67), - - /** - * Number of seek errors during multi-scan iteration. - */ - MULTISCAN_SEEK_ERRORS((byte) -0x68), - - /** - * Maximum number of ticker types. - */ - TICKER_ENUM_MAX((byte) -0x54); - - private final byte value; - - TickerType(final byte value) { - this.value = value; - } + /** + * total block cache misses + *

+ * REQUIRES: BLOCK_CACHE_MISS == BLOCK_CACHE_INDEX_MISS + + * BLOCK_CACHE_FILTER_MISS + + * BLOCK_CACHE_DATA_MISS; + */ + BLOCK_CACHE_MISS((byte) 0x0), + + /** + * total block cache hit + *

+ * REQUIRES: BLOCK_CACHE_HIT == BLOCK_CACHE_INDEX_HIT + + * BLOCK_CACHE_FILTER_HIT + + * BLOCK_CACHE_DATA_HIT; + */ + BLOCK_CACHE_HIT((byte) 0x1), + + BLOCK_CACHE_ADD((byte) 0x2), + + /** + * Number of failures when adding blocks to block cache. + */ + BLOCK_CACHE_ADD_FAILURES((byte) 0x3), + + /** + * Number of times cache miss when accessing index block from block cache. + */ + BLOCK_CACHE_INDEX_MISS((byte) 0x4), + + /** + * Number of times cache hit when accessing index block from block cache. + */ + BLOCK_CACHE_INDEX_HIT((byte) 0x5), + + /** + * Number of index blocks added to block cache. + */ + BLOCK_CACHE_INDEX_ADD((byte) 0x6), + + /** + * Number of bytes of index blocks inserted into cache + */ + BLOCK_CACHE_INDEX_BYTES_INSERT((byte) 0x7), + + /** + * Number of times cache miss when accessing filter block from block cache. + */ + BLOCK_CACHE_FILTER_MISS((byte) 0x8), + + /** + * Number of times cache hit when accessing filter block from block cache. + */ + BLOCK_CACHE_FILTER_HIT((byte) 0x9), + + /** + * Number of filter blocks added to block cache. + */ + BLOCK_CACHE_FILTER_ADD((byte) 0xA), + + /** + * Number of bytes of bloom filter blocks inserted into cache + */ + BLOCK_CACHE_FILTER_BYTES_INSERT((byte) 0xB), + + /** + * Number of times cache miss when accessing data block from block cache. + */ + BLOCK_CACHE_DATA_MISS((byte) 0xC), + + /** + * Number of times cache hit when accessing data block from block cache. + */ + BLOCK_CACHE_DATA_HIT((byte) 0xD), + + /** + * Number of data blocks added to block cache. + */ + BLOCK_CACHE_DATA_ADD((byte) 0xE), + + /** + * Number of bytes of data blocks inserted into cache + */ + BLOCK_CACHE_DATA_BYTES_INSERT((byte) 0xF), + + /** + * Number of bytes read from cache. + */ + BLOCK_CACHE_BYTES_READ((byte) 0x10), + + /** + * Number of bytes written into cache. + */ + BLOCK_CACHE_BYTES_WRITE((byte) 0x11), + + /** + * Number of Block cache Compression dictionary misses. + */ + BLOCK_CACHE_COMPRESSION_DICT_MISS((byte) 0x12), + + /** + * Number of Block cache Compression dictionary hits. + */ + BLOCK_CACHE_COMPRESSION_DICT_HIT((byte) 0x13), + + /** + * Number of Block cache Compression dictionary additions. + */ + BLOCK_CACHE_COMPRESSION_DICT_ADD((byte) 0x14), + + /** + * Number of Block cache Compression dictionary bytes inserted. + */ + BLOCK_CACHE_COMPRESSION_DICT_BYTES_INSERT((byte) 0x15), + + /** + * Redundant additions to block cache. + */ + BLOCK_CACHE_ADD_REDUNDANT((byte) 0x16), + + /** + * Redundant additions to block cache index. + */ + BLOCK_CACHE_INDEX_ADD_REDUNDANT((byte) 0x17), + + /** + * Redundant additions to block cache filter. + */ + BLOCK_CACHE_FILTER_ADD_REDUNDANT((byte) 0x18), + + /** + * Redundant additions to block cache data. + */ + BLOCK_CACHE_DATA_ADD_REDUNDANT((byte) 0x19), + + /** + * Redundant additions to block cache compression dictionary. + */ + BLOCK_CACHE_COMPRESSION_DICT_ADD_REDUNDANT((byte) 0x1A), + + /** + * Number of secondary cache hits. + */ + SECONDARY_CACHE_HITS((byte) 0x1B), + + /** + * Number of secondary cache filter hits. + */ + SECONDARY_CACHE_FILTER_HITS((byte) 0x1C), + + /** + * Number of secondary cache index hits. + */ + SECONDARY_CACHE_INDEX_HITS((byte) 0x1D), + + /** + * Number of secondary cache data hits. + */ + SECONDARY_CACHE_DATA_HITS((byte) 0x1E), + + /** + * Number of compressed secondary cache dummy hits. + */ + COMPRESSED_SECONDARY_CACHE_DUMMY_HITS((byte) 0x1F), + + /** + * Number of compressed secondary cache hits. + */ + COMPRESSED_SECONDARY_CACHE_HITS((byte) 0x20), + + /** + * Number of compressed secondary cache promotions. + */ + COMPRESSED_SECONDARY_CACHE_PROMOTIONS((byte) 0x21), + + /** + * Number of compressed secondary cache promotion skips. + */ + COMPRESSED_SECONDARY_CACHE_PROMOTION_SKIPS((byte) 0x22), + + /** + * Number of times bloom filter has avoided file reads. + */ + BLOOM_FILTER_USEFUL((byte) 0x23), + + /** + * Number of times bloom FullFilter has not avoided the reads. + */ + BLOOM_FILTER_FULL_POSITIVE((byte) 0x24), + + /** + * Number of times bloom FullFilter has not avoided the reads and data actually + * exist. + */ + BLOOM_FILTER_FULL_TRUE_POSITIVE((byte) 0x25), + + /** + * Number of times bloom was checked before creating iterator on a file. + */ + BLOOM_FILTER_PREFIX_CHECKED((byte) 0x26), + + /** + * Number of times it was useful (in avoiding iterator creation) that bloom was checked before + * creating iterator on a file. + */ + BLOOM_FILTER_PREFIX_USEFUL((byte) 0x27), + + /** + * Number of times bloom produced a true positive result. + */ + BLOOM_FILTER_PREFIX_TRUE_POSITIVE((byte) 0x28), + + /** + * Number of persistent cache hit + */ + PERSISTENT_CACHE_HIT((byte) 0x29), + + /** + * Number of persistent cache miss + */ + PERSISTENT_CACHE_MISS((byte) 0x2A), + + /** + * Number of total simulation block cache hits + */ + SIM_BLOCK_CACHE_HIT((byte) 0x2B), + + /** + * Number of total simulation block cache misses + */ + SIM_BLOCK_CACHE_MISS((byte) 0x2C), + + /** + * Number of memtable hits. + */ + MEMTABLE_HIT((byte) 0x2D), + + /** + * Number of of memtable misses. + */ + MEMTABLE_MISS((byte) 0x2E), + + /** + * Number of Get() queries served by L0 + */ + GET_HIT_L0((byte) 0x2F), + + /** + * Number of Get() queries served by L1 + */ + GET_HIT_L1((byte) 0x30), + + /** + * Number of Get() queries served by L2 and up + */ + GET_HIT_L2_AND_UP((byte) 0x31), + + /** + * COMPACTION_KEY_DROP_* count the reasons for key drop during compaction + * There are 4 reasons currently. + */ + + /** + * key was written with a newer value. + */ + COMPACTION_KEY_DROP_NEWER_ENTRY((byte) 0x32), + + /** + * Also includes keys dropped for range del. + * The key is obsolete. + */ + COMPACTION_KEY_DROP_OBSOLETE((byte) 0x33), + + /** + * key was covered by a range tombstone. + */ + COMPACTION_KEY_DROP_RANGE_DEL((byte) 0x34), + + /** + * User compaction function has dropped the key. + */ + COMPACTION_KEY_DROP_USER((byte) 0x35), + + /** + * all keys in range were deleted. + */ + COMPACTION_RANGE_DEL_DROP_OBSOLETE((byte) 0x36), + + /** + * Deletions obsoleted before bottom level due to file gap optimization. + */ + COMPACTION_OPTIMIZED_DEL_DROP_OBSOLETE((byte) 0x37), + + /** + * Compactions cancelled to prevent ENOSPC + */ + COMPACTION_CANCELLED((byte) 0x38), + + /** + * Number of keys written to the database via the Put and Write call's. + */ + NUMBER_KEYS_WRITTEN((byte) 0x39), + + /** + * Number of Keys read. + */ + NUMBER_KEYS_READ((byte) 0x3A), + + /** + * Number keys updated, if inplace update is enabled + */ + NUMBER_KEYS_UPDATED((byte) 0x3B), + + /** + * The number of uncompressed bytes issued by DB::Put(), DB::Delete(),\ + * DB::Merge(), and DB::Write(). + */ + BYTES_WRITTEN((byte) 0x3C), + + /** + * The number of uncompressed bytes read from DB::Get(). It could be + * either from memtables, cache, or table files. + * + * For the number of logical bytes read from DB::MultiGet(), + * please use {@link #NUMBER_MULTIGET_BYTES_READ}. + */ + BYTES_READ((byte) 0x3D), + + /** + * The number of calls to seek. + */ + NUMBER_DB_SEEK((byte) 0x3E), + + /** + * The number of calls to next. + */ + NUMBER_DB_NEXT((byte) 0x3F), + + /** + * The number of calls to prev. + */ + NUMBER_DB_PREV((byte) 0x40), + + /** + * The number of calls to seek that returned data. + */ + NUMBER_DB_SEEK_FOUND((byte) 0x41), + + /** + * The number of calls to next that returned data. + */ + NUMBER_DB_NEXT_FOUND((byte) 0x42), + + /** + * The number of calls to prev that returned data. + */ + NUMBER_DB_PREV_FOUND((byte) 0x43), + + /** + * The number of uncompressed bytes read from an iterator. + * Includes size of key and value. + */ + ITER_BYTES_READ((byte) 0x44), + + /** + * Number of internal skipped during iteration + */ + NUMBER_ITER_SKIP((byte) 0x45), + + /** + * Number of times we had to reseek inside an iteration to skip + * over large number of keys with same userkey. + */ + NUMBER_OF_RESEEKS_IN_ITERATION((byte) 0x46), + + /** + * Number of iterators created. + */ + NO_ITERATOR_CREATED((byte) 0x47), + + /** + * Number of iterators deleted. + */ + NO_ITERATOR_DELETED((byte) 0x48), + + /** + * Number of file opens. + */ + NO_FILE_OPENS((byte) 0x49), + + /** + * Number of file errors. + */ + NO_FILE_ERRORS((byte) 0x4A), + + /** + * Writer has to wait for compaction or flush to finish. + */ + STALL_MICROS((byte) 0x4B), + + /** + * The wait time for db mutex. + * + * Disabled by default. To enable it set stats level to {@link StatsLevel#ALL} + */ + DB_MUTEX_WAIT_MICROS((byte) 0x4C), + + /** + * Number of MultiGet calls. + */ + NUMBER_MULTIGET_CALLS((byte) 0x4D), + + /** + * Number of MultiGet keys read. + */ + NUMBER_MULTIGET_KEYS_READ((byte) 0x4E), + + /** + * Number of MultiGet bytes read. + */ + NUMBER_MULTIGET_BYTES_READ((byte) 0x4F), + + /** + * Number of MultiGet keys found (vs number requested) + */ + NUMBER_MULTIGET_KEYS_FOUND((byte) 0x50), + + /** + * Number of Merge failures. + */ + NUMBER_MERGE_FAILURES((byte) 0x51), + + /** + * Record the number of calls to {@link RocksDB#getUpdatesSince(long)}. Useful to keep track of + * transaction log iterator refreshes. + */ + GET_UPDATES_SINCE_CALLS((byte) 0x52), + + /** + * Number of times WAL sync is done. + */ + WAL_FILE_SYNCED((byte) 0x53), + + /** + * Number of bytes written to WAL. + */ + WAL_FILE_BYTES((byte) 0x54), + + /** + * Writes can be processed by requesting thread or by the thread at the + * head of the writers queue. + */ + WRITE_DONE_BY_SELF((byte) 0x55), + + /** + * Equivalent to writes done for others. + */ + WRITE_DONE_BY_OTHER((byte) 0x56), + + /** + * Number of Write calls that request WAL. + */ + WRITE_WITH_WAL((byte) 0x57), + + /** + * Bytes read during compaction. + */ + COMPACT_READ_BYTES((byte) 0x58), + + /** + * Bytes written during compaction. + */ + COMPACT_WRITE_BYTES((byte) 0x59), + + /** + * Bytes written during flush. + */ + FLUSH_WRITE_BYTES((byte) 0x5A), + + /** + * Compaction read bytes marked. + */ + COMPACT_READ_BYTES_MARKED((byte) 0x5B), + + /** + * Compaction read bytes periodically. + */ + COMPACT_READ_BYTES_PERIODIC((byte) 0x5C), + + /** + * Compaction read bytes TTL. + */ + COMPACT_READ_BYTES_TTL((byte) 0x5D), + + /** + * Compaction write bytes marked. + */ + COMPACT_WRITE_BYTES_MARKED((byte) 0x5E), + + /** + * Compaction write bytes periodically. + */ + COMPACT_WRITE_BYTES_PERIODIC((byte) 0x5F), + + /** + * Compaction write bytes TTL. + */ + COMPACT_WRITE_BYTES_TTL((byte) 0x60), + + /** + * Number of table's properties loaded directly from file, without creating table reader object. + */ + NUMBER_DIRECT_LOAD_TABLE_PROPERTIES((byte) 0x61), + + /** + * Number of supervision acquires. + */ + NUMBER_SUPERVERSION_ACQUIRES((byte) 0x62), + + /** + * Number of supervision releases. + */ + NUMBER_SUPERVERSION_RELEASES((byte) 0x63), + + /** + * Number of supervision cleanups. + */ + NUMBER_SUPERVERSION_CLEANUPS((byte) 0x64), + + /** + * Number of compressions executed. + */ + NUMBER_BLOCK_COMPRESSED((byte) 0x65), + + /** + * Number of decompressions executed. + */ + NUMBER_BLOCK_DECOMPRESSED((byte) 0x66), + + /** + * Number of input bytes (uncompressed) to compression for SST blocks that are stored compressed. + */ + BYTES_COMPRESSED_FROM((byte) 0x67), + + /** + * Number of output bytes (compressed) from compression for SST blocks that are stored compressed. + */ + BYTES_COMPRESSED_TO((byte) 0x68), + + /** + * Number of uncompressed bytes for SST blocks that are stored uncompressed because compression + * type is kNoCompression, or some error case caused compression not to run or produce an output. + * Index blocks are only counted if enable_index_compression is true. + */ + BYTES_COMPRESSION_BYPASSED((byte) 0x69), + + /** + * Number of input bytes (uncompressed) to compression for SST blocks that are stored uncompressed + * because the compression result was rejected, either because the ratio was not acceptable (see + * CompressionOptions::max_compressed_bytes_per_kb) or found invalid by the `verify_compression` + * option. + */ + BYTES_COMPRESSION_REJECTED((byte) 0x6A), + + /** + * Like {@link #BYTES_COMPRESSION_BYPASSED} but counting number of blocks. + */ + NUMBER_BLOCK_COMPRESSION_BYPASSED((byte) 0x6B), + + /** + * Like {@link #BYTES_COMPRESSION_REJECTED} but counting number of blocks. + */ + NUMBER_BLOCK_COMPRESSION_REJECTED((byte) 0x6C), + + /** + * Number of input bytes (compressed) to decompression in reading compressed SST blocks from + * storage. + */ + BYTES_DECOMPRESSED_FROM((byte) 0x6D), + + /** + * Number of output bytes (uncompressed) from decompression in reading compressed SST blocks from + * storage. + */ + BYTES_DECOMPRESSED_TO((byte) 0x6E), + + /** + * Merge operations cumulative time. + */ + MERGE_OPERATION_TOTAL_TIME((byte) 0x6F), + + /** + * Filter operations cumulative time. + */ + FILTER_OPERATION_TOTAL_TIME((byte) 0x70), + + /** + * Compaction CPU cumulative time. + */ + COMPACTION_CPU_TOTAL_TIME((byte) 0x71), + + /** + * Row cache hits. + */ + ROW_CACHE_HIT((byte) 0x72), + + /** + * Row cache misses. + */ + ROW_CACHE_MISS((byte) 0x73), + + /** + * Read amplification statistics. + * + * Read amplification can be calculated using this formula + * (READ_AMP_TOTAL_READ_BYTES / READ_AMP_ESTIMATE_USEFUL_BYTES) + * + * REQUIRES: ReadOptions::read_amp_bytes_per_bit to be enabled + */ + + /** + * Estimate of total bytes actually used. + */ + READ_AMP_ESTIMATE_USEFUL_BYTES((byte) 0x74), + + /** + * Total size of loaded data blocks. + */ + READ_AMP_TOTAL_READ_BYTES((byte) 0x75), + + /** + * Number of refill intervals where rate limiter's bytes are fully consumed. + */ + NUMBER_RATE_LIMITER_DRAINS((byte) 0x76), + + /** + * BlobDB specific stats + * Number of Put/PutTTL/PutUntil to BlobDB. + */ + BLOB_DB_NUM_PUT((byte) 0x77), + + /** + * Number of Write to BlobDB. + */ + BLOB_DB_NUM_WRITE((byte) 0x78), + + /** + * Number of Get to BlobDB. + */ + BLOB_DB_NUM_GET((byte) 0x79), + + /** + * Number of MultiGet to BlobDB. + */ + BLOB_DB_NUM_MULTIGET((byte) 0x7A), + + /** + * Number of Seek/SeekToFirst/SeekToLast/SeekForPrev to BlobDB iterator. + */ + BLOB_DB_NUM_SEEK((byte) 0x7B), + + /** + * Number of Next to BlobDB iterator. + */ + BLOB_DB_NUM_NEXT((byte) 0x7C), + + /** + * Number of Prev to BlobDB iterator. + */ + BLOB_DB_NUM_PREV((byte) 0x7D), + + /** + * Number of keys written to BlobDB. + */ + BLOB_DB_NUM_KEYS_WRITTEN((byte) 0x7E), + + /** + * Number of keys read from BlobDB. + */ + BLOB_DB_NUM_KEYS_READ((byte) 0x7F), + + /** + * Number of bytes (key + value) written to BlobDB. + */ + BLOB_DB_BYTES_WRITTEN((byte) -0x1), + + /** + * Number of bytes (keys + value) read from BlobDB. + */ + BLOB_DB_BYTES_READ((byte) -0x2), + + /** + * Number of keys written by BlobDB as non-TTL inlined value. + */ + BLOB_DB_WRITE_INLINED((byte) -0x3), + + /** + * Number of keys written by BlobDB as TTL inlined value. + */ + BLOB_DB_WRITE_INLINED_TTL((byte) -0x4), + + /** + * Number of keys written by BlobDB as non-TTL blob value. + */ + BLOB_DB_WRITE_BLOB((byte) -0x5), + + /** + * Number of keys written by BlobDB as TTL blob value. + */ + BLOB_DB_WRITE_BLOB_TTL((byte) -0x6), + + /** + * Number of bytes written to blob file. + */ + BLOB_DB_BLOB_FILE_BYTES_WRITTEN((byte) -0x7), + + /** + * Number of bytes read from blob file. + */ + BLOB_DB_BLOB_FILE_BYTES_READ((byte) -0x8), + + /** + * Number of times a blob files being synced. + */ + BLOB_DB_BLOB_FILE_SYNCED((byte) -0x9), + + /** + * Number of blob index evicted from base DB by BlobDB compaction filter because + * of expiration. + */ + BLOB_DB_BLOB_INDEX_EXPIRED_COUNT((byte) -0xA), + + /** + * Size of blob index evicted from base DB by BlobDB compaction filter + * because of expiration. + */ + BLOB_DB_BLOB_INDEX_EXPIRED_SIZE((byte) -0xB), + + /** + * Number of blob index evicted from base DB by BlobDB compaction filter because + * of corresponding file deleted. + */ + BLOB_DB_BLOB_INDEX_EVICTED_COUNT((byte) -0xC), + + /** + * Size of blob index evicted from base DB by BlobDB compaction filter + * because of corresponding file deleted. + */ + BLOB_DB_BLOB_INDEX_EVICTED_SIZE((byte) -0xD), + + /** + * Number of blob files being garbage collected. + */ + BLOB_DB_GC_NUM_FILES((byte) -0xE), + + /** + * Number of blob files generated by garbage collection. + */ + BLOB_DB_GC_NUM_NEW_FILES((byte) -0xF), + + /** + * Number of BlobDB garbage collection failures. + */ + BLOB_DB_GC_FAILURES((byte) -0x10), + + /** + * Number of keys relocated to new blob file by garbage collection. + */ + BLOB_DB_GC_NUM_KEYS_RELOCATED((byte) -0x11), + + /** + * Number of bytes relocated to new blob file by garbage collection. + */ + BLOB_DB_GC_BYTES_RELOCATED((byte) -0x12), + + /** + * Number of blob files evicted because of BlobDB is full. + */ + BLOB_DB_FIFO_NUM_FILES_EVICTED((byte) -0x13), + + /** + * Number of keys in the blob files evicted because of BlobDB is full. + */ + BLOB_DB_FIFO_NUM_KEYS_EVICTED((byte) -0x14), + + /** + * Number of bytes in the blob files evicted because of BlobDB is full. + */ + BLOB_DB_FIFO_BYTES_EVICTED((byte) -0x15), + + /** + * Number of times cache miss when accessing blob from blob cache. + */ + BLOB_DB_CACHE_MISS((byte) -0x16), + + /** + * Number of times cache hit when accessing blob from blob cache. + */ + BLOB_DB_CACHE_HIT((byte) -0x17), + + /** + * Number of data blocks added to blob cache. + */ + BLOB_DB_CACHE_ADD((byte) -0x18), + + /** + * Number of failures when adding blobs to blob cache. + */ + BLOB_DB_CACHE_ADD_FAILURES((byte) -0x19), + + /** + * Number of bytes read from blob cache. + */ + BLOB_DB_CACHE_BYTES_READ((byte) -0x1A), + + /** + * Number of bytes written into blob cache. + */ + BLOB_DB_CACHE_BYTES_WRITE((byte) -0x1B), + + /** + * These counters indicate a performance issue in WritePrepared transactions. + * We should not seem them ticking them much. + * Number of times prepare_mutex_ is acquired in the fast path. + */ + TXN_PREPARE_MUTEX_OVERHEAD((byte) -0x1C), + + /** + * Number of times old_commit_map_mutex_ is acquired in the fast path. + */ + TXN_OLD_COMMIT_MAP_MUTEX_OVERHEAD((byte) -0x1D), + + /** + * Number of times we checked a batch for duplicate keys. + */ + TXN_DUPLICATE_KEY_OVERHEAD((byte) -0x1E), + + /** + * Number of times snapshot_mutex_ is acquired in the fast path. + */ + TXN_SNAPSHOT_MUTEX_OVERHEAD((byte) -0x1F), + + /** + * Number of times ::Get returned TryAgain due to expired snapshot seq + */ + TXN_GET_TRY_AGAIN((byte) -0x20), + + /** + * Number of files marked as trash by delete scheduler + */ + FILES_MARKED_TRASH((byte) -0x21), + + /** + * Number of trash files deleted by the background thread from the trash queue + */ + FILES_DELETED_FROM_TRASH_QUEUE((byte) -0x22), + + /** + * Number of files deleted immediately by delete scheduler + */ + FILES_DELETED_IMMEDIATELY((byte) -0x23), + + /** + * DB error handler statistics + */ + ERROR_HANDLER_BG_ERROR_COUNT((byte) -0x24), + + /** + * Number of background errors handled by the error handler. + */ + ERROR_HANDLER_BG_IO_ERROR_COUNT((byte) -0x25), + + /** + * Number of retryable background I/O errors handled by the error handler. + * This is a subset of {@link #ERROR_HANDLER_BG_IO_ERROR_COUNT}. + */ + ERROR_HANDLER_BG_RETRYABLE_IO_ERROR_COUNT((byte) -0x26), + + /** + * Number of auto resumes handled by the error handler. + */ + ERROR_HANDLER_AUTORESUME_COUNT((byte) -0x27), + + /** + * Total Number of auto resume retries handled by the error handler. + */ + ERROR_HANDLER_AUTORESUME_RETRY_TOTAL_COUNT((byte) -0x28), + + /** + * Number of auto resumes that succeded that were handled by the error handler. + */ + ERROR_HANDLER_AUTORESUME_SUCCESS_COUNT((byte) -0x29), + + /** + * Bytes of raw data (payload) found on memtable at flush time. + * Contains the sum of garbage payload (bytes that are discarded + * at flush time) and useful payload (bytes of data that will + * eventually be written to SSTable). + */ + MEMTABLE_PAYLOAD_BYTES_AT_FLUSH((byte) -0x2A), + + /** + * Outdated bytes of data present on memtable at flush time. + */ + MEMTABLE_GARBAGE_BYTES_AT_FLUSH((byte) -0x2B), + + /** + * Bytes read by `VerifyChecksum()` and `VerifyFileChecksums()` APIs. + */ + VERIFY_CHECKSUM_READ_BYTES((byte) -0x2C), + + /** + * Bytes read whilst creating backups. + */ + BACKUP_READ_BYTES((byte) -0x2D), + + /** + * Bytes written whilst creating backups. + */ + BACKUP_WRITE_BYTES((byte) -0x2E), + + /** + * Remote compaction bytes read. + */ + REMOTE_COMPACT_READ_BYTES((byte) -0x2F), + + /** + * Remote compaction bytes written. + */ + REMOTE_COMPACT_WRITE_BYTES((byte) -0x30), + + /** + * Bytes read from hot files. + */ + HOT_FILE_READ_BYTES((byte) -0x31), + + /** + * Bytes read from warm files. + */ + WARM_FILE_READ_BYTES((byte) -0x32), + + /** + * Bytes read from cool files. + */ + COOL_FILE_READ_BYTES((byte) -0x5B), + + /** + * Bytes read from cold files. + */ + COLD_FILE_READ_BYTES((byte) -0x33), + + /** + * Bytes read from ice cold files. + */ + ICE_FILE_READ_BYTES((byte) -0x59), + + /** + * Numer of reads from hot files. + */ + HOT_FILE_READ_COUNT((byte) -0x34), + + /** + * Numer of reads from warm files. + */ + WARM_FILE_READ_COUNT((byte) -0x35), + + /** + * Numer of reads from cool files. + */ + COOL_FILE_READ_COUNT((byte) -0x5C), + + /** + * Numer of reads from cold files. + */ + COLD_FILE_READ_COUNT((byte) -0x36), + + /** + * Numer of reads from ice cold files. + */ + ICE_FILE_READ_COUNT((byte) -0x5A), + + /** + * Bytes read from the last level. + */ + LAST_LEVEL_READ_BYTES((byte) -0x37), + + /** + * Number of reads from the last level. + */ + LAST_LEVEL_READ_COUNT((byte) -0x38), + + /** + * Bytes read from the non-last level. + */ + NON_LAST_LEVEL_READ_BYTES((byte) -0x39), + + /** + * Number of reads from the non-last level. + */ + NON_LAST_LEVEL_READ_COUNT((byte) -0x3A), + + /** + * Statistics on iterator Seek() (and variants) for each sorted run. + * i.e a single user Seek() can result in many sorted run Seek()s. + * The stats are split between last level and non-last level. + * Filtered: a filter such as prefix Bloom filter indicate the Seek() would + * not find anything relevant, so avoided a likely access to data+index + * blocks. + */ + + LAST_LEVEL_SEEK_FILTERED((byte) -0x3B), + /** + * Filter match: a filter such as prefix Bloom filter was queried but did + * not filter out the seek. + */ + LAST_LEVEL_SEEK_FILTER_MATCH((byte) -0x3C), + + /** + * At least one data block was accessed for a Seek() (or variant) on a + * sorted run. + */ + LAST_LEVEL_SEEK_DATA((byte) -0x3D), + + /** + * At least one value() was accessed for the seek (suggesting it was useful), + * and no filter such as prefix Bloom was queried. + */ + LAST_LEVEL_SEEK_DATA_USEFUL_NO_FILTER((byte) -0x3E), + + /** + * At least one value() was accessed for the seek (suggesting it was useful), + * after querying a filter such as prefix Bloom. + */ + LAST_LEVEL_SEEK_DATA_USEFUL_FILTER_MATCH((byte) -0x3F), + + /** + * Similar to {@link #LAST_LEVEL_SEEK_FILTERED} but for the non-last level. + */ + NON_LAST_LEVEL_SEEK_FILTERED((byte) -0x40), + + /** + * Similar to {@link #LAST_LEVEL_SEEK_FILTER_MATCH} but for the non-last level. + */ + NON_LAST_LEVEL_SEEK_FILTER_MATCH((byte) -0x41), + + /** + * Similar to {@link #LAST_LEVEL_SEEK_DATA} but for the non-last level. + */ + NON_LAST_LEVEL_SEEK_DATA((byte) -0x42), + + /** + * Similar to {@link #LAST_LEVEL_SEEK_DATA_USEFUL_NO_FILTER} but for the non-last level. + */ + NON_LAST_LEVEL_SEEK_DATA_USEFUL_NO_FILTER((byte) -0x43), + + /** + * Similar to {@link #LAST_LEVEL_SEEK_DATA_USEFUL_FILTER_MATCH} but for the non-last level. + */ + NON_LAST_LEVEL_SEEK_DATA_USEFUL_FILTER_MATCH((byte) -0x44), + + /** + * Number of block checksum verifications. + */ + BLOCK_CHECKSUM_COMPUTE_COUNT((byte) -0x45), + + /** + * Number of times RocksDB detected a corruption while verifying a block + * checksum. RocksDB does not remember corruptions that happened during user + * reads so the same block corruption may be detected multiple times. + */ + BLOCK_CHECKSUM_MISMATCH_COUNT((byte) -0x46), + + /** + * Number of multiget co-rountines. + */ + MULTIGET_COROUTINE_COUNT((byte) -0x47), + + /** + * Time spent in the ReadAsync file system call. + */ + READ_ASYNC_MICROS((byte) -0x48), + + /** + * Number of errors returned to the async read callback. + */ + ASYNC_READ_ERROR_COUNT((byte) -0x49), + + /** + * Number of lookup into the prefetched tail (see + * `TABLE_OPEN_PREFETCH_TAIL_READ_BYTES`) + * that can't find its data for table open + */ + TABLE_OPEN_PREFETCH_TAIL_MISS((byte) -0x4A), + + /** + * Number of lookup into the prefetched tail (see + * `TABLE_OPEN_PREFETCH_TAIL_READ_BYTES`) + * that finds its data for table open + */ + TABLE_OPEN_PREFETCH_TAIL_HIT((byte) -0x4B), + + /** + * Number of times timestamps are checked on accessing the table + */ + TIMESTAMP_FILTER_TABLE_CHECKED((byte) -0x4C), + + /** + * Number of times timestamps can successfully help skip the table access + */ + TIMESTAMP_FILTER_TABLE_FILTERED((byte) -0x4D), + + /** + * Number of times readahead is trimmed during scans when ReadOptions.auto_readahead_size is set. + */ + READAHEAD_TRIMMED((byte) -0x4E), + + /** + * Maximum size of the FIFO compactions. + */ + FIFO_MAX_SIZE_COMPACTIONS((byte) -0x4F), + + /** + * TTL of the FIFO compactions. + */ + FIFO_TTL_COMPACTIONS((byte) -0x50), + + /** + * Change temperature of the FIFO compactions. + */ + FIFO_CHANGE_TEMPERATURE_COMPACTIONS((byte) -0x58), + + /** + * Number of bytes prefetched during user initiated scan. + */ + PREFETCH_BYTES((byte) -0x51), + + /** + * Number of prefetched bytes that were actually useful during user initiated scan. + */ + PREFETCH_BYTES_USEFUL((byte) -0x52), + + /** + * Number of FS reads avoided due to prefetching during user initiated scan. + */ + PREFETCH_HITS((byte) -0x53), + + /** + * Footer corruption detected when opening an SST file for reading. + */ + SST_FOOTER_CORRUPTION_COUNT((byte) -0x55), + + /** + * Counters for file read retries with the verify_and_reconstruct_read file system option after + * detecting a checksum mismatch. + */ + FILE_READ_CORRUPTION_RETRY_COUNT((byte) -0x56), + + /** + * Counters for file read retries with the verify_and_reconstruct_read file system option after + * detecting a checksum mismatch. + */ + FILE_READ_CORRUPTION_RETRY_SUCCESS_COUNT((byte) -0x57), + + /** + * Counter for the number of times a WBWI is ingested into the DB. This + * happens when IngestWriteBatchWithIndex() is used and when large + * transaction optimization is enabled through + * TransactionOptions::large_txn_commit_optimize_threshold. + */ + NUMBER_WBWI_INGEST((byte) -0x5D), + + /** + * Failure to load the UDI during SST table open. + */ + SST_USER_DEFINED_INDEX_LOAD_FAIL_COUNT((byte) -0x5E), + + /** + * Bytes of output files successfully resumed during remote compaction. + */ + REMOTE_COMPACT_RESUMED_BYTES((byte) -0x5F), + + /** + * MultiScan statistics + */ + + /** + * Number of calls to Iterator::Prepare() for multi-scan. + */ + MULTISCAN_PREPARE_CALLS((byte) -0x60), + + /** + * Number of errors during Iterator::Prepare() for multi-scan. + */ + MULTISCAN_PREPARE_ERRORS((byte) -0x61), + + /** + * Number of data blocks prefetched during multi-scan Prepare(). + */ + MULTISCAN_BLOCKS_PREFETCHED((byte) -0x62), + + /** + * Number of data blocks found in cache during multi-scan Prepare(). + */ + MULTISCAN_BLOCKS_FROM_CACHE((byte) -0x63), + + /** + * Total bytes prefetched during multi-scan Prepare(). + */ + MULTISCAN_PREFETCH_BYTES((byte) -0x64), + + /** + * Number of prefetched blocks that were never accessed (wasted). + */ + MULTISCAN_PREFETCH_BLOCKS_WASTED((byte) -0x65), + + /** + * Number of I/O requests issued during multi-scan Prepare(). + */ + MULTISCAN_IO_REQUESTS((byte) -0x66), + + /** + * Number of non-adjacent blocks coalesced into single I/O request. + */ + MULTISCAN_IO_COALESCED_NONADJACENT((byte) -0x67), + + /** + * Number of seek errors during multi-scan iteration. + */ + MULTISCAN_SEEK_ERRORS((byte) -0x68), + + /** + * Maximum number of ticker types. + */ + TICKER_ENUM_MAX((byte) -0x54); + + private final byte value; + + TickerType(final byte value) { + this.value = value; + } /** * Returns the byte value of the enumerations value